From e9db4ec4689650a51a694a54beebfdc68c7711a5 Mon Sep 17 00:00:00 2001 From: Vladislav Ermolaev <71265338+ermolaevv@users.noreply.github.com> Date: Fri, 25 Oct 2024 21:02:25 +0300 Subject: [PATCH 001/155] =?UTF-8?q?=D0=95=D1=80=D0=BC=D0=BE=D0=BB=D0=B0?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=92=D0=BB=D0=B0=D0=B4=D0=B8=D1=81=D0=BB=D0=B0?= =?UTF-8?q?=D0=B2.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2014.=20=D0=9C=D0=B8=D0=BD?= =?UTF-8?q?=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD?= =?UTF-8?q?=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC?= =?UTF-8?q?=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8?= =?UTF-8?q?=D1=86=D1=8B.=20(#7)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи - последовательный обход элементов матрицы Описание MPI задачи - при считывании данных из taskData матрица преобразуется в развернутую (объединенную в один вектор) - развернутая матрица делится на сегменты по числу процессов - каждый сегмент данных отправляется своему процессу - каждый процесс находит минимальный элемент в сегменте данных - reduce проходит по всем процессам и выбирает минимальный из промежуточных результатов --------- Co-authored-by: Nesterov Alexander Co-authored-by: Nikita Korovin --- .../ermolaev_v_min_matrix/func_tests/main.cpp | 224 ++++++++++++++++++ .../ermolaev_v_min_matrix/include/ops_mpi.hpp | 48 ++++ .../ermolaev_v_min_matrix/perf_tests/main.cpp | 119 ++++++++++ .../mpi/ermolaev_v_min_matrix/src/ops_mpi.cpp | 133 +++++++++++ .../ermolaev_v_min_matrix/func_tests/main.cpp | 159 +++++++++++++ .../ermolaev_v_min_matrix/include/ops_seq.hpp | 28 +++ .../ermolaev_v_min_matrix/perf_tests/main.cpp | 108 +++++++++ .../seq/ermolaev_v_min_matrix/src/ops_seq.cpp | 67 ++++++ 8 files changed, 886 insertions(+) create mode 100644 tasks/mpi/ermolaev_v_min_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/ermolaev_v_min_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/ermolaev_v_min_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/ermolaev_v_min_matrix/func_tests/main.cpp create mode 100644 tasks/seq/ermolaev_v_min_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/ermolaev_v_min_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/ermolaev_v_min_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/ermolaev_v_min_matrix/func_tests/main.cpp b/tasks/mpi/ermolaev_v_min_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..9c1db410fc9 --- /dev/null +++ b/tasks/mpi/ermolaev_v_min_matrix/func_tests/main.cpp @@ -0,0 +1,224 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp" + +TEST(ermolaev_v_min_matrix_mpi, Test_Min_10x10) { + const int count_rows = 10; + const int count_columns = 10; + const int gen_min = -500; + const int gen_max = 500; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermolaev_v_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermolaev_v_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermolaev_v_min_matrix_mpi, Test_Min_10x100) { + const int count_rows = 10; + const int count_columns = 100; + const int gen_min = -500; + const int gen_max = 500; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermolaev_v_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermolaev_v_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermolaev_v_min_matrix_mpi, Test_Min_100x10) { + const int count_rows = 100; + const int count_columns = 10; + const int gen_min = -500; + const int gen_max = 500; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermolaev_v_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermolaev_v_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermolaev_v_min_matrix_mpi, Test_Min_100x100) { + const int count_rows = 100; + const int count_columns = 100; + const int gen_min = -500; + const int gen_max = 500; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermolaev_v_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermolaev_v_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} diff --git a/tasks/mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp b/tasks/mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..54e61fbfa67 --- /dev/null +++ b/tasks/mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace ermolaev_v_min_matrix_mpi { + +std::vector getRandomVector(int sz, int min = 0, int max = 100); +std::vector> getRandomMatrix(int rows, int columns, int min = 0, int max = 100); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res_{}; + boost::mpi::communicator world; +}; + +} // namespace ermolaev_v_min_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/ermolaev_v_min_matrix/perf_tests/main.cpp b/tasks/mpi/ermolaev_v_min_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..c81a6b24e2e --- /dev/null +++ b/tasks/mpi/ermolaev_v_min_matrix/perf_tests/main.cpp @@ -0,0 +1,119 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp" + +TEST(ermolaev_v_min_matrix_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + int ref = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 gen(dev()); + + int count_rows = 4000; + int count_columns = 4000; + int gen_min = -500; + int gen_max = 500; + + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_min[0]); + } +} + +TEST(ermolaev_v_min_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_sum(1, INT_MAX); + int ref = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 gen(dev()); + + int count_rows = 4000; + int count_columns = 4000; + int gen_min = -500; + int gen_max = 500; + + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_sum[0]); + } +} diff --git a/tasks/mpi/ermolaev_v_min_matrix/src/ops_mpi.cpp b/tasks/mpi/ermolaev_v_min_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..fda1b0bbc2d --- /dev/null +++ b/tasks/mpi/ermolaev_v_min_matrix/src/ops_mpi.cpp @@ -0,0 +1,133 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector ermolaev_v_min_matrix_mpi::getRandomVector(int sz, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min + gen() % (max - min + 1); + } + return vec; +} + +std::vector> ermolaev_v_min_matrix_mpi::getRandomMatrix(int rows, int columns, int min, int max) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = ermolaev_v_min_matrix_mpi::getRandomVector(columns, min, max); + } + return vec; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], input_[i].begin()); + } + + // Init value for output + res_ = INT_MAX; + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + std::vector local_res(input_.size()); + + for (unsigned int i = 0; i < input_.size(); i++) { + local_res[i] = *std::min_element(input_[i].begin(), input_[i].end()); + } + + res_ = *std::min_element(local_res.begin(), local_res.end()); + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] * taskData->inputs_count[1] / world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // Init vectors + + unsigned int rows = taskData->inputs_count[0]; + unsigned int columns = taskData->inputs_count[1]; + input_ = std::vector(rows * columns); + + for (unsigned int i = 0; i < rows; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < columns; j++) { + input_[i * columns + j] = tmp_ptr[j]; + } + } + + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + delta * proc, delta); + } + } + + local_input_ = std::vector(delta); + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + // Init value for output + res_ = INT_MAX; + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1 && !taskData->inputs.empty(); + } + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + int local_res = *std::min_element(local_input_.begin(), local_input_.end()); + reduce(world, local_res, res_, boost::mpi::minimum(), 0); + + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/seq/ermolaev_v_min_matrix/func_tests/main.cpp b/tasks/seq/ermolaev_v_min_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..8cc451adfcd --- /dev/null +++ b/tasks/seq/ermolaev_v_min_matrix/func_tests/main.cpp @@ -0,0 +1,159 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "seq/ermolaev_v_min_matrix/include/ops_seq.hpp" + +TEST(ermolaev_v_min_matrix_seq, test_min_10x10) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int count_rows = 10; + const int count_columns = 10; + const int gen_min = -500; + const int gen_max = 500; + int ref = INT_MIN; + + // Create data + std::vector out(1, INT_MAX); + std::vector> in = + ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermolaev_v_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ref, out[0]); +} + +TEST(ermolaev_v_min_matrix_seq, test_min_10x100) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int count_rows = 10; + const int count_columns = 50; + const int gen_min = -500; + const int gen_max = 500; + int ref = INT_MIN; + + // Create data + std::vector out(1, INT_MAX); + std::vector> in = + ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermolaev_v_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ref, out[0]); +} + +TEST(ermolaev_v_min_matrix_seq, test_min_100x10) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int count_rows = 100; + const int count_columns = 10; + const int gen_min = -500; + const int gen_max = 500; + int ref = INT_MIN; + + // Create data + std::vector out(1, INT_MAX); + std::vector> in = + ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermolaev_v_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ref, out[0]); +} + +TEST(ermolaev_v_min_matrix_seq, test_min_100x100) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int count_rows = 100; + const int count_columns = 100; + const int gen_min = -500; + const int gen_max = 500; + int ref = INT_MIN; + + // Create data + std::vector out(1, INT_MAX); + std::vector> in = + ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermolaev_v_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ref, out[0]); +} diff --git a/tasks/seq/ermolaev_v_min_matrix/include/ops_seq.hpp b/tasks/seq/ermolaev_v_min_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..1077c2182db --- /dev/null +++ b/tasks/seq/ermolaev_v_min_matrix/include/ops_seq.hpp @@ -0,0 +1,28 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace ermolaev_v_min_matrix_seq { + +std::vector getRandomVector(int sz, int min = 0, int max = 100); +std::vector> getRandomMatrix(int rows, int columns, int min = 0, int max = 100); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +} // namespace ermolaev_v_min_matrix_seq \ No newline at end of file diff --git a/tasks/seq/ermolaev_v_min_matrix/perf_tests/main.cpp b/tasks/seq/ermolaev_v_min_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..76b96d0072e --- /dev/null +++ b/tasks/seq/ermolaev_v_min_matrix/perf_tests/main.cpp @@ -0,0 +1,108 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/ermolaev_v_min_matrix/include/ops_seq.hpp" + +TEST(ermolaev_v_min_matrix_seq, test_pipeline_run) { + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + int ref = INT_MIN; + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + int count_rows = 4000; + int count_columns = 4000; + int gen_min = -500; + int gen_max = 500; + + global_matrix = ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataSeq->outputs_count.emplace_back(global_min.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_min[0]); +} + +TEST(sequential_ermolaev_v_min_matrix_seq, test_task_run) { + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + int ref = INT_MIN; + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + int count_rows = 4000; + int count_columns = 4000; + int gen_min = -500; + int gen_max = 500; + + global_matrix = ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataSeq->outputs_count.emplace_back(global_min.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_min[0]); +} \ No newline at end of file diff --git a/tasks/seq/ermolaev_v_min_matrix/src/ops_seq.cpp b/tasks/seq/ermolaev_v_min_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..57d1fba2b9e --- /dev/null +++ b/tasks/seq/ermolaev_v_min_matrix/src/ops_seq.cpp @@ -0,0 +1,67 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/ermolaev_v_min_matrix/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +std::vector ermolaev_v_min_matrix_seq::getRandomVector(int sz, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min + gen() % (max - min + 1); + } + return vec; +} + +std::vector> ermolaev_v_min_matrix_seq::getRandomMatrix(int rows, int columns, int min, int max) { + std::vector> vec(rows); + + for (int i = 0; i < rows; i++) { + vec[i] = ermolaev_v_min_matrix_seq::getRandomVector(columns, min, max); + } + return vec; +} + +bool ermolaev_v_min_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + + // Init value for output + res_ = INT_MAX; + return true; +} + +bool ermolaev_v_min_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0 && taskData->outputs_count[0] == 1; +} + +bool ermolaev_v_min_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); i++) { + for (size_t j = 0; j < input_[i].size(); j++) { + if (input_[i][j] < res_) { + res_ = input_[i][j]; + } + } + } + return true; +} + +bool ermolaev_v_min_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} From be9992c6ab35e60563e1490d2051e887e13e4306 Mon Sep 17 00:00:00 2001 From: "Michael K." <130953568+kmichaelk@users.noreply.github.com> Date: Fri, 25 Oct 2024 21:04:50 +0300 Subject: [PATCH 002/155] =?UTF-8?q?=D0=9A=D1=80=D1=8B=D0=BB=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=9C=D0=B8=D1=85=D0=B0=D0=B8=D0=BB.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=205.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B5=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=87=D0=B5?= =?UTF-8?q?=D1=80=D0=B5=D0=B4=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B9=20=D0=B7?= =?UTF-8?q?=D0=BD=D0=B0=D0=BA=D0=BE=D0=B2=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B9=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8?= =?UTF-8?q?=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0=20(#11)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание последовательной задачи.** Нахождение числа чередований знаков значений соседних элементов вектора происходит в один проход за линейное время, алгоритм проверки реализован как конечный автомат с двумя состояниями, соответствующими положительному и отрицательному знаковым диапазонам. В начале обработки текущего элемента состояние соответствует знаку предыдущего. **Описание MPI задачи**. На элементарном уровне алгоритм аналогичен алгоритму, применяемой в последовательной задаче. Каждому процессу рассылаются равномерные объемы данных при помощи `scatterv` с выравниванием в случае, если общий объем данных не делится на количество процессов нацело. Для избавления от необходимости дополнительных проверок чисел на стыке в координирующем процессе после редуцирования частичных результатов, в каждый процесс, за исключением последнего, присылается еще и первый подлежащий обработке процессом следующего ранга элемент путем сдвига границы блока памяти. Аналогичного результата без необходимости проверки в каждом процессе можно было бы добиться записью справа от блока входных данных числа такого же знака, как последнее входное, но это потребовало бы копирования входных данных во временную структуру из-за необходимости резервирования дополнительной памяти для хранения дополнительного элемента. --------- Co-authored-by: Nesterov Alexander --- .../func_tests/main.cpp | 196 ++++++++++++++++ .../include/ops_mpi.hpp | 209 ++++++++++++++++++ .../perf_tests/main.cpp | 82 +++++++ .../src/ops_mpi.cpp | 1 + .../func_tests/main.cpp | 104 +++++++++ .../include/ops_seq.hpp | 77 +++++++ .../perf_tests/main.cpp | 76 +++++++ .../src/ops_seq.cpp | 1 + 8 files changed, 746 insertions(+) create mode 100644 tasks/mpi/krylov_m_num_of_alternations_signs/func_tests/main.cpp create mode 100644 tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp create mode 100644 tasks/mpi/krylov_m_num_of_alternations_signs/perf_tests/main.cpp create mode 100644 tasks/mpi/krylov_m_num_of_alternations_signs/src/ops_mpi.cpp create mode 100644 tasks/seq/krylov_m_num_of_alternations_signs/func_tests/main.cpp create mode 100644 tasks/seq/krylov_m_num_of_alternations_signs/include/ops_seq.hpp create mode 100644 tasks/seq/krylov_m_num_of_alternations_signs/perf_tests/main.cpp create mode 100644 tasks/seq/krylov_m_num_of_alternations_signs/src/ops_seq.cpp diff --git a/tasks/mpi/krylov_m_num_of_alternations_signs/func_tests/main.cpp b/tasks/mpi/krylov_m_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..99e411dada6 --- /dev/null +++ b/tasks/mpi/krylov_m_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,196 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../include/ops_mpi.hpp" + +#define EXPAND(x) x + +#define T_DEF(macro, ...) \ + EXPAND(macro(int16_t, __VA_ARGS__)) \ + EXPAND(macro(int32_t, __VA_ARGS__)) \ + EXPAND(macro(int64_t, __VA_ARGS__)) \ + EXPAND(macro(float, __VA_ARGS__)) + +using CountType = uint32_t; + +class krylov_m_num_of_alternations_signs_mpi_test : public ::testing::Test { + protected: + template + void run_generic_test(const boost::mpi::communicator &world, const CountType count, std::vector &in, + const std::vector &shift_indices, CountType &out, + std::shared_ptr &taskDataPar) { + if (world.rank() == 0) { + in = std::vector(count); + std::iota(in.begin(), in.end(), 1); + + for (auto idx : shift_indices) { + in[idx] *= -1; + } + + // + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&out)); + taskDataPar->outputs_count.emplace_back(1); + } + + // + krylov_m_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel( + taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + } + + template + std::vector get_random_vector(T size, T min, T max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::uniform_int_distribution<> distr(min, max); // inclusive + + std::vector v(size); + std::transform(v.cbegin(), v.cend(), v.begin(), [&](auto) { return distr(gen); }); + + return v; + } + + // + + template + void T_fails_validation() { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + krylov_m_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel( + taskDataPar); + + if (world.rank() == 0) { + taskDataPar->outputs_count.emplace_back(0); + EXPECT_FALSE(testMpiTaskParallel.validation()); + } else { + EXPECT_TRUE(testMpiTaskParallel.validation()); + } + } +}; + +// clang-format off +using PrecalcOpts = std::tuple< + CountType /* count */, + std::vector /* shift_indices */, + CountType /* num */ +>; +// clang-format on +class krylov_m_num_of_alternations_signs_mpi_test_precalc : public krylov_m_num_of_alternations_signs_mpi_test, + public ::testing::WithParamInterface { + protected: + template + void PT_yields_correct_result() { + boost::mpi::communicator world; + const auto &[count, shift_indices, num] = GetParam(); + + std::vector in; + CountType out = 0; + // + std::shared_ptr taskDataPar = std::make_shared(); + + run_generic_test(world, count, in, shift_indices, out, taskDataPar); + + if (world.rank() == 0) { + ASSERT_EQ(out, num); + } + } +}; + +class krylov_m_num_of_alternations_signs_mpi_test_random : public krylov_m_num_of_alternations_signs_mpi_test, + public ::testing::WithParamInterface { + protected: + template + void PT_yields_correct_result_random() { + boost::mpi::communicator world; + const auto count = GetParam(); + + std::vector in; + CountType out = 0; + std::vector shift_indices; + // + if (world.rank() == 0) { + const auto shift_indices_count = get_random_vector(1, 0, count - 1)[0]; + shift_indices = get_random_vector(shift_indices_count, 0, count - 1); + } + // + std::shared_ptr taskDataPar = std::make_shared(); + + run_generic_test(world, count, in, shift_indices, out, taskDataPar); + + if (world.rank() == 0) { + CountType reference_num = 0; + + // + std::shared_ptr taskDataSeq = std::make_shared(*taskDataPar); + taskDataSeq->outputs[0] = reinterpret_cast(&reference_num); + + // + krylov_m_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(out, reference_num); + } + } +}; + +#define DECL_TYPE_VALUE_PARAMETRIZED_TEST(TypeParam, Fixture, TestName, ...) \ + TEST_P(Fixture, TestName##__##TypeParam) { PT_##TestName(__VA_ARGS__); } +#define DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(Fixture, TestName, ...) \ + T_DEF(DECL_TYPE_VALUE_PARAMETRIZED_TEST, Fixture, TestName, __VA_ARGS__) + +#define DECL_TYPE_PARAMETRIZED_TEST(TypeParam, Fixture, TestName, ...) \ + TEST_F(Fixture, TestName##__##TypeParam) { T_##TestName(__VA_ARGS__); } +#define DECL_TYPE_PARAMETRIZED_TEST_ALL(Fixture, TestName, ...) \ + T_DEF(DECL_TYPE_PARAMETRIZED_TEST, Fixture, TestName, __VA_ARGS__) + +INSTANTIATE_TEST_SUITE_P( + krylov_m_num_of_alternations_signs_mpi_test, krylov_m_num_of_alternations_signs_mpi_test_precalc, + // clang-format off + ::testing::Values( + std::make_tuple(129, std::vector{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}, 7), + std::make_tuple(129, std::vector{0, /* . */}, 1), + std::make_tuple(129, std::vector{/* . */ 128}, 1), + std::make_tuple(129, std::vector{/* . */ 64 /* . */}, 2), + std::make_tuple(129, std::vector{/* . */ 43, /* . */ 86, /* . */}, 4), + std::make_tuple(129, std::vector{/* . */}, 0), + std::make_tuple(128, std::vector{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}, 7), + std::make_tuple(128, std::vector{0, /* . */}, 1), + std::make_tuple(128, std::vector{/* . */ 127}, 1), + std::make_tuple(128, std::vector{/* . */ 64 /* . */}, 2), + std::make_tuple(129, std::vector{/* . */ 43, /* . */ 86, /* . */}, 4), + std::make_tuple(129, std::vector{/* . */ 42, /* . */ 84, /* . */}, 4), + std::make_tuple(128, std::vector{/* . */}, 0), + std::make_tuple(4, std::vector{/* . */}, 0), + std::make_tuple(4, std::vector{/* . */ 2 /* . */}, 2), + std::make_tuple(1, std::vector{/* . */}, 0), + std::make_tuple(1, std::vector{0}, 0), + std::make_tuple(0, std::vector{/* . */}, 0) + ) + // clang-format on +); + +INSTANTIATE_TEST_SUITE_P(krylov_m_num_of_alternations_signs_mpi_test, + krylov_m_num_of_alternations_signs_mpi_test_random, + ::testing::Values(1, 2, 3, 4, 5, 128, 129)); + +DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(krylov_m_num_of_alternations_signs_mpi_test_precalc, yields_correct_result); +DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(krylov_m_num_of_alternations_signs_mpi_test_random, yields_correct_result_random); +DECL_TYPE_PARAMETRIZED_TEST_ALL(krylov_m_num_of_alternations_signs_mpi_test, fails_validation); \ No newline at end of file diff --git a/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp b/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp new file mode 100644 index 00000000000..9513f6a0f15 --- /dev/null +++ b/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp @@ -0,0 +1,209 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace krylov_m_num_of_alternations_signs_mpi { + +using namespace std::chrono_literals; + +template +class TestMPITaskParallel : public ppc::core::Task { + static_assert(sizeof(CountType) <= + sizeof(typename decltype(std::declval().inputs_count)::value_type), + "There's no sense in providing CountType that exceeds TaskData capabilities"); + + static bool distribute(std::vector& distribution, std::vector& displacement, int amount, int world_size) { + const int average = amount / world_size; + if (average < world_size) { + distribution.resize(world_size, 0); + distribution[0] = amount; + displacement.resize(world_size, 0); + return false; + } + + distribution.resize(world_size, average); + displacement.resize(world_size); + + const int leftover = amount % world_size; + + int pos = 0; + for (int i = 0; i < world_size; i++) { + if (i < leftover) { + distribution[i]++; + } + displacement[i] = pos; + pos += distribution[i]; + } + + return true; + } + + static int calc_distribution(int world_rank, int amount, int world_size) { + const int average = amount / world_size; + const int leftover = amount % world_size; + if (average < world_size && world_rank != 0) { + return 0; + } + return average + ((world_rank < leftover) ? 1 : 0); + } + + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override { + internal_order_test(); + + res = 0; + + unsigned int amount = 0; + if (world.rank() == 0) { + amount = taskData->inputs_count[0]; + } + boost::mpi::broadcast(world, amount, 0); + + if (world.rank() == 0) { + std::vector distribution; + std::vector displacement; + if (distribute(distribution, displacement, amount, world.size())) { + std::transform(distribution.cbegin(), distribution.cend() - 1, distribution.begin(), + [](auto x) { return x + 1; }); + } + + partial_input_.resize(distribution[0]); + + const auto* in_p = reinterpret_cast(taskData->inputs[0]); + boost::mpi::scatterv(world, in_p, distribution, displacement, partial_input_.data(), distribution[0], 0); + } else { + int distribution = calc_distribution(world.rank(), amount, world.size()); + if (distribution > 0) { + if (world.rank() != world.size() - 1) { + distribution++; + } + partial_input_.resize(distribution); + boost::mpi::scatterv(world, partial_input_.data(), distribution, 0); + } + } + + return true; + } + + bool validation() override { + internal_order_test(); + + return world.rank() != 0 || (taskData->outputs_count[0] == 1); + } + + bool run() override { + internal_order_test(); + + CountType partial_res = 0; + + const std::size_t size = partial_input_.size(); + if (size > 0) { + bool neg = partial_input_[0] < 0; + for (std::size_t i = 1; i < size; i++) { + bool cur = partial_input_[i] < 0; + if (neg == cur) { + continue; + } + partial_res++; + neg = cur; + } + } + + boost::mpi::reduce(world, partial_res, res, std::plus(), 0); + + return true; + } + + bool post_processing() override { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + + return true; + } + + private: + std::vector partial_input_{}; + CountType res{}; + boost::mpi::communicator world; +}; + +template +class TestMPITaskSequential : public ppc::core::Task { + static_assert(sizeof(CountType) <= + sizeof(typename decltype(std::declval().inputs_count)::value_type), + "There's no sense in providing CountType that exceeds TaskData capabilities"); + + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override { + internal_order_test(); + + const auto count = taskData->inputs_count[0]; + const auto* in_p = reinterpret_cast(taskData->inputs[0]); + input_.resize(count); + std::copy(in_p, in_p + count, std::begin(input_)); + // + res = 0; + + return true; + } + + bool validation() override { + internal_order_test(); + + return taskData->outputs_count[0] == 1; + } + + bool run() override { + internal_order_test(); + + const std::size_t size = input_.size(); + if (size > 1) { + bool neg = input_[0] < 0; + for (std::size_t i = 1; i < size; i++) { + bool cur = input_[i] < 0; + if (neg == cur) { + continue; + } + res++; + neg = cur; + } + } + + std::this_thread::sleep_for(20ms); + + return true; + } + + bool post_processing() override { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + + return true; + } + + private: + std::vector input_{}; + CountType res{}; +}; + +} // namespace krylov_m_num_of_alternations_signs_mpi diff --git a/tasks/mpi/krylov_m_num_of_alternations_signs/perf_tests/main.cpp b/tasks/mpi/krylov_m_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..13cc214c998 --- /dev/null +++ b/tasks/mpi/krylov_m_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,82 @@ +#include + +#include +#include + +#include "../include/ops_mpi.hpp" +#include "core/perf/include/perf.hpp" + +class krylov_m_num_of_alternations_signs_mpi_perf_test : public ::testing::Test { + using ElementType = int32_t; + using CountType = uint32_t; + // + const CountType in_count = 128; + const std::vector shift_indices{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}; + // + const CountType num = 7; + + protected: + void run_perf_test( + const std::function &perfAttr, + const std::shared_ptr &perfResults)> &runner) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + + // + std::vector in; + CountType out = 0; + if (world.rank() == 0) { + in = std::vector(in_count); + std::iota(in.begin(), in.end(), 1); + + for (auto idx : shift_indices) { + in[idx] *= -1; + } + + // + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&out)); + taskDataPar->outputs_count.emplace_back(1); + } + + // + auto testMpiTaskParallel = + std::make_shared>( + taskDataPar); + + // + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + ppc::core::Perf perfAnalyzer(testMpiTaskParallel); + runner(perfAnalyzer, perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_EQ(num, out); + } + } +}; + +TEST_F(krylov_m_num_of_alternations_signs_mpi_perf_test, test_pipeline_run) { + run_perf_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.pipeline_run(perfAttr, perfResults); + }); +} + +TEST_F(krylov_m_num_of_alternations_signs_mpi_perf_test, test_task_run) { + run_perf_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.task_run(perfAttr, perfResults); + }); +} diff --git a/tasks/mpi/krylov_m_num_of_alternations_signs/src/ops_mpi.cpp b/tasks/mpi/krylov_m_num_of_alternations_signs/src/ops_mpi.cpp new file mode 100644 index 00000000000..a9cf15bbf66 --- /dev/null +++ b/tasks/mpi/krylov_m_num_of_alternations_signs/src/ops_mpi.cpp @@ -0,0 +1 @@ +#include "../include/ops_mpi.hpp" diff --git a/tasks/seq/krylov_m_num_of_alternations_signs/func_tests/main.cpp b/tasks/seq/krylov_m_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..4ef58303194 --- /dev/null +++ b/tasks/seq/krylov_m_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,104 @@ +#include + +#include +#include +#include + +#include "../include/ops_seq.hpp" + +#define EXPAND(x) x + +#define T_DEF(macro, ...) \ + EXPAND(macro(int16_t, __VA_ARGS__)) \ + EXPAND(macro(int32_t, __VA_ARGS__)) \ + EXPAND(macro(int64_t, __VA_ARGS__)) \ + EXPAND(macro(float, __VA_ARGS__)) + +using CountType = uint32_t; + +// clang-format off +using PredefParam = std::tuple< + CountType /* count */, + std::vector /* shift_indices */, + CountType /* num */ +>; +// clang-format on + +class krylov_m_num_of_alternations_signs_seq_test : public ::testing::TestWithParam { + protected: + template + void PT_yields_correct_result() { + const auto &[count, shift_indices, num] = GetParam(); + + // + std::vector in(count); + CountType out = 0; + + std::iota(in.begin(), in.end(), 1); + + for (auto idx : shift_indices) { + in[idx] *= -1; + } + + // + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(1); + + // + krylov_m_num_of_alternations_signs_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(num, out); + } + + template + void T_fails_validation() { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->outputs_count.emplace_back(0); + + krylov_m_num_of_alternations_signs_seq::TestTaskSequential testTaskSequential(taskDataSeq); + EXPECT_FALSE(testTaskSequential.validation()); + } +}; + +#define DECL_TYPE_VALUE_PARAMETRIZED_TEST(TypeParam, TestName) \ + TEST_P(krylov_m_num_of_alternations_signs_seq_test, TestName##__##TypeParam) { TestName(); } +#define DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(TestName) T_DEF(DECL_TYPE_PARAMETRIZED_TEST, PT_##TestName) + +#define DECL_TYPE_PARAMETRIZED_TEST(TypeParam, TestName) \ + TEST_P(krylov_m_num_of_alternations_signs_seq_test, TestName##__##TypeParam) { TestName(); } +#define DECL_TYPE_PARAMETRIZED_TEST_ALL(TestName) T_DEF(DECL_TYPE_PARAMETRIZED_TEST, T_##TestName) + +INSTANTIATE_TEST_SUITE_P( + krylov_m_num_of_alternations_signs_seq_test, krylov_m_num_of_alternations_signs_seq_test, + // clang-format off + ::testing::Values( + std::make_tuple(129, std::vector{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}, 7), + std::make_tuple(129, std::vector{0, /* . */}, 1), + std::make_tuple(129, std::vector{/* . */ 128}, 1), + std::make_tuple(129, std::vector{/* . */ 64 /* . */}, 2), + std::make_tuple(129, std::vector{/* . */ 43, /* . */ 86, /* . */}, 4), + std::make_tuple(129, std::vector{/* . */}, 0), + std::make_tuple(128, std::vector{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}, 7), + std::make_tuple(128, std::vector{0, /* . */}, 1), + std::make_tuple(128, std::vector{/* . */ 127}, 1), + std::make_tuple(128, std::vector{/* . */ 64 /* . */}, 2), + std::make_tuple(129, std::vector{/* . */ 43, /* . */ 86, /* . */}, 4), + std::make_tuple(129, std::vector{/* . */ 42, /* . */ 84, /* . */}, 4), + std::make_tuple(128, std::vector{/* . */}, 0), + std::make_tuple(4, std::vector{/* . */}, 0), + std::make_tuple(4, std::vector{/* . */ 2 /* . */}, 2), + std::make_tuple(1, std::vector{/* . */}, 0), + std::make_tuple(1, std::vector{0}, 0), + std::make_tuple(0, std::vector{/* . */}, 0) + ) + // clang-format on +); + +DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(yields_correct_result); +DECL_TYPE_PARAMETRIZED_TEST_ALL(fails_validation); diff --git a/tasks/seq/krylov_m_num_of_alternations_signs/include/ops_seq.hpp b/tasks/seq/krylov_m_num_of_alternations_signs/include/ops_seq.hpp new file mode 100644 index 00000000000..d5adbd772af --- /dev/null +++ b/tasks/seq/krylov_m_num_of_alternations_signs/include/ops_seq.hpp @@ -0,0 +1,77 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace krylov_m_num_of_alternations_signs_seq { + +using namespace std::chrono_literals; + +template +class TestTaskSequential : public ppc::core::Task { + static_assert(sizeof(CountType) <= + sizeof(typename decltype(std::declval().inputs_count)::value_type), + "There's no sense in providing CountType that exceeds TaskData capabilities"); + + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override { + internal_order_test(); + + const auto count = taskData->inputs_count[0]; + const auto* in_p = reinterpret_cast(taskData->inputs[0]); + input_.resize(count); + std::copy(in_p, in_p + count, std::begin(input_)); + // + res = 0; + + return true; + } + + bool validation() override { + internal_order_test(); + + return taskData->outputs_count[0] == 1; + } + + bool run() override { + internal_order_test(); + + const std::size_t size = input_.size(); + if (size > 1) { + bool neg = input_[0] < 0; + for (std::size_t i = 1; i < size; i++) { + bool cur = input_[i] < 0; + if (neg == cur) { + continue; + } + res++; + neg = cur; + } + } + + return true; + } + + bool post_processing() override { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + + return true; + } + + private: + std::vector input_{}; + CountType res{}; +}; + +} // namespace krylov_m_num_of_alternations_signs_seq diff --git a/tasks/seq/krylov_m_num_of_alternations_signs/perf_tests/main.cpp b/tasks/seq/krylov_m_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..ca7c7a29f5e --- /dev/null +++ b/tasks/seq/krylov_m_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,76 @@ +#include + +#include +#include +#include + +#include "../include/ops_seq.hpp" +#include "core/perf/include/perf.hpp" + +class krylov_m_num_of_alternations_signs_seq_perf_test : public ::testing::Test { + using ElementType = int32_t; + using CountType = uint32_t; + // + const CountType in_count = 128; + const std::vector shift_indices{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}; + // + const CountType num = 7; + + protected: + void run_perf_test( + const std::function &perfAttr, + const std::shared_ptr &perfResults)> &runner) { + // + std::vector in(in_count); + CountType out = 0; + + std::iota(in.begin(), in.end(), 1); + + for (auto idx : shift_indices) { + in[idx] *= -1; + } + + // + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(1); + + // + auto testTaskSequential = + std::make_shared>( + taskDataSeq); + + // + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + ppc::core::Perf perfAnalyzer(testTaskSequential); + runner(perfAnalyzer, perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(num, out); + } +}; + +TEST_F(krylov_m_num_of_alternations_signs_seq_perf_test, test_pipeline_run) { + run_perf_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.pipeline_run(perfAttr, perfResults); + }); +} + +TEST_F(krylov_m_num_of_alternations_signs_seq_perf_test, test_task_run) { + run_perf_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.task_run(perfAttr, perfResults); + }); +} diff --git a/tasks/seq/krylov_m_num_of_alternations_signs/src/ops_seq.cpp b/tasks/seq/krylov_m_num_of_alternations_signs/src/ops_seq.cpp new file mode 100644 index 00000000000..ab96e22e641 --- /dev/null +++ b/tasks/seq/krylov_m_num_of_alternations_signs/src/ops_seq.cpp @@ -0,0 +1 @@ +#include "../include/ops_seq.hpp" From 72deed355304a4968de7804901fa2955dccd36ea Mon Sep 17 00:00:00 2001 From: AndreySorokin7 <129724280+AndreySorokin7@users.noreply.github.com> Date: Fri, 25 Oct 2024 21:16:57 +0300 Subject: [PATCH 003/155] =?UTF-8?q?=D0=A1=D0=BE=D1=80=D0=BE=D0=BA=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=90=D0=BD=D0=B4=D1=80=D0=B5=D0=B9.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2026.=20=D0=9F=D1=80=D0=BE=D0=B2=D0=B5=D1=80=D0=BA?= =?UTF-8?q?=D0=B0=20=D0=BB=D0=B5=D0=BA=D1=81=D0=B8=D0=BA=D0=BE=D0=B3=D1=80?= =?UTF-8?q?=D0=B0=D1=84=D0=B8=D1=87=D0=B5=D1=81=D0=BA=D0=BE=D0=B9=20=D1=83?= =?UTF-8?q?=D0=BF=D0=BE=D1=80=D1=8F=D0=B4=D0=BE=D1=87=D0=B5=D0=BD=D0=BD?= =?UTF-8?q?=D0=BE=D1=81=D1=82=D0=B8=20=D0=B4=D0=B2=D1=83=D1=85=20=D1=81?= =?UTF-8?q?=D1=82=D1=80=D0=BE=D0=BA.=20(#21)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Последовательный одновременный поэлементный проход по двум строкам с поиском первых двух различных символов с последующим определением большего из них. Отсюда выявлением большей строки с выводом 0 или 1. 0 - str1str2 Описание MPI задачи: Массив строк (std::vector>) разделяется на две отдельные строки. Каждая строка делится на равновеликие сегменты по числу процессов и каждый из полученных сегментов отправляется в свой процесс. В каждом процессе происходит сравнение элементов, как это описано в последовательной задаче и на выходе выдают одно из трёх значений. 0 и 1 несут ту же суть, что и раньше, а значение 2 означает идентичность строк, работа с которыми осуществлялась внутри процессов. После завершения всех процессов происходит последовательный поиск по процессам. Значение первого процесса, значение которого не равно 2, записывается как результат и проход останавливается. Это необходимо, так нужно зафиксировать самое первое отличие глобальных строк. --------- Co-authored-by: AndreySorokin7 --- .../func_tests/main.cpp | 237 ++++++++++++++++++ .../include/ops_mpi.hpp | 46 ++++ .../perf_tests/main.cpp | 94 +++++++ .../src/ops_mpi.cpp | 128 ++++++++++ .../func_tests/main.cpp | 121 +++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 89 +++++++ .../src/ops_seq.cpp | 45 ++++ 8 files changed, 784 insertions(+) create mode 100644 tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp create mode 100644 tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp create mode 100644 tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp create mode 100644 tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp create mode 100644 tasks/seq/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp create mode 100644 tasks/seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp create mode 100644 tasks/seq/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp create mode 100644 tasks/seq/sorokin_a_check_lexicographic_order_of_strings/src/ops_seq.cpp diff --git a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp new file mode 100644 index 00000000000..b5345df0607 --- /dev/null +++ b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp @@ -0,0 +1,237 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp" + +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_1_characters) { + boost::mpi::communicator world; + std::vector> strs = {{'a', 'p', 'p', 'p'}, {'b', 'a', 'g', 'p'}}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(0, res[0]); + } +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_1_characters_res1) { + boost::mpi::communicator world; + std::vector> strs = {{'c', 'p', 'p', 'p'}, {'b', 'a', 'g', 'p'}}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(1, res[0]); + } +} +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_3_characters_res1) { + boost::mpi::communicator world; + std::vector> strs = {{'a', 'a', 'p', 'p'}, {'a', 'a', 'g', 'p'}}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(1, res[0]); + } +} +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_4_characters) { + boost::mpi::communicator world; + std::vector> strs = {{'a', 'p', 'p', 'a'}, {'a', 'p', 'p', 'p'}}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(0, res[0]); + } +} +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, Equal_strings) { + boost::mpi::communicator world; + std::vector str1; + std::vector str2; + std::vector> strs = {str1, str2}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], 0); + ASSERT_EQ(2, res[0]); + } +} diff --git a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp new file mode 100644 index 00000000000..15ba3230fbe --- /dev/null +++ b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp @@ -0,0 +1,46 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace sorokin_a_check_lexicographic_order_of_strings_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector local_input1_, local_input2_; + int res_{}; + boost::mpi::communicator world; +}; + +} // namespace sorokin_a_check_lexicographic_order_of_strings_mpi diff --git a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp new file mode 100644 index 00000000000..cd4fd75c53f --- /dev/null +++ b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp @@ -0,0 +1,94 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp" + +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_20000000_characters) { + boost::mpi::communicator world; + std::vector str1(20000000, 'a'); + std::vector str2(19999999, 'a'); + str2.push_back('b'); + std::vector> strs = {str1, str2}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res[0], 0); + } +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_20000000_characters_res1) { + boost::mpi::communicator world; + std::vector str1(20000000, 'b'); + std::vector str2(19999999, 'b'); + str2.push_back('a'); + std::vector> strs = {str1, str2}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res[0], 1); + } +} diff --git a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp new file mode 100644 index 00000000000..7d509804204 --- /dev/null +++ b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp @@ -0,0 +1,128 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res_ = 0; + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < std::min(input_[0].size(), input_[1].size()); ++i) { + if (static_cast(input_[0][i]) > static_cast(input_[1][i])) { + res_ = 1; + break; + } + if (static_cast(input_[0][i]) < static_cast(input_[1][i])) { + break; + } + } + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[1] / world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_[0].data() + delta * proc, delta); + world.send(proc, 1, input_[1].data() + delta * proc, delta); + } + } + local_input1_ = std::vector(delta); + local_input2_ = std::vector(delta); + if (world.rank() == 0) { + local_input1_ = std::vector(input_[0].begin(), input_[0].begin() + delta); + local_input2_ = std::vector(input_[1].begin(), input_[1].begin() + delta); + } else { + world.recv(0, 0, local_input1_.data(), delta); + world.recv(0, 1, local_input2_.data(), delta); + } + res_ = 2; + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int local_res = 2; + for (size_t i = 0; i < local_input1_.size(); ++i) { + if (static_cast(local_input1_[i]) > static_cast(local_input2_[i])) { + local_res = 1; + break; + } + if (static_cast(local_input1_[i]) < static_cast(local_input2_[i])) { + local_res = 0; + break; + } + } + std::vector all_res; + boost::mpi::gather(world, local_res, all_res, 0); + + if (world.rank() == 0) { + for (int result : all_res) { + if (result != 2) { + res_ = result; + break; + } + } + } + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp new file mode 100644 index 00000000000..c0c70b78826 --- /dev/null +++ b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp @@ -0,0 +1,121 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp" + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_3_characters) { + // Create data + std::vector> in = {{'a', 'b', 'c'}, {'a', 'b', 'd'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_1_characters_res1) { + // Create data + std::vector> in = {{'f', 'p', 'p'}, {'a', 'p', 'g'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_2_characters_res1) { + // Create data + std::vector> in = {{'c', 'p', 'p'}, {'c', 'a', 'g'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_1_characters) { + // Create data + std::vector> in = {{'a', 'p', 'p'}, {'b', 'a', 'g'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_3_characters_res1) { + // Create data + std::vector> in = {{'b', 'p', 'p'}, {'b', 'p', 'g'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} diff --git a/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp new file mode 100644 index 00000000000..8782efb0a9a --- /dev/null +++ b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace sorokin_a_check_lexicographic_order_of_strings_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +} // namespace sorokin_a_check_lexicographic_order_of_strings_seq \ No newline at end of file diff --git a/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp new file mode 100644 index 00000000000..0cd57c0110a --- /dev/null +++ b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp" + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_20000000_characters) { + // Create data + std::vector str1(20000000, 'a'); + std::vector str2(19999999, 'a'); + str2.push_back('b'); + std::vector> in = {str1, str2}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_20000000_characters_res1) { + // Create data + std::vector str1(20000000, 'b'); + std::vector str2(19999999, 'b'); + str2.push_back('a'); + std::vector> in = {str1, str2}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, out[0]); +} diff --git a/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/src/ops_seq.cpp b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/src/ops_seq.cpp new file mode 100644 index 00000000000..7511ebfe6a5 --- /dev/null +++ b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/src/ops_seq.cpp @@ -0,0 +1,45 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res_ = 0; + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < std::min(input_[0].size(), input_[1].size()); ++i) { + if (static_cast(input_[0][i]) > static_cast(input_[1][i])) { + res_ = 1; + break; + } + if (static_cast(input_[0][i]) < static_cast(input_[1][i])) { + break; + } + } + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} From 5a24c2b05252fa0f6af71b822f56b737ad67bd63 Mon Sep 17 00:00:00 2001 From: Vladlen Korablev <112872588+korablev-vm@users.noreply.github.com> Date: Fri, 25 Oct 2024 21:23:38 +0300 Subject: [PATCH 004/155] =?UTF-8?q?=D0=9A=D0=BE=D1=80=D0=B0=D0=B1=D0=BB?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=92=D0=BB=D0=B0=D0=B4=D0=BB=D0=B5=D0=BD.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2019.=20=20=D0=98=D0=BD=D1=82=D0=B5?= =?UTF-8?q?=D0=B3=D1=80=D0=B8=D1=80=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B5=20?= =?UTF-8?q?=D0=BC=D0=B5=D1=82=D0=BE=D0=B4=D0=BE=D0=BC=20=D0=BF=D1=80=D1=8F?= =?UTF-8?q?=D0=BC=D0=BE=D1=83=D0=B3=D0=BE=D0=BB=D1=8C=D0=BD=D0=B8=D0=BA?= =?UTF-8?q?=D0=BE=D0=B2=20(#20)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SEQ: Реализован метод средних прямоугольников. 1) Исходный отрезок разбивается на n частей, каждая из которых имеет длину (a-b)/n 2) На каждом отрезке выбирается середина 3) Вычисление функции в выбранной точке (для каждого отрезка), умножение полученного результата на длину отрезка 4) Суммирование полученных значений Точность алгоритма прямо пропорциональна количеству разбиений (n) MPI: 1) Распределение данных с помощью broadcast - передача границ интегрирования и количества шагов всем процесса 2) Равномерное распределение интеграла примерно поровну (с учетом не целого деления) между процессами 3) Подсчет локального результата каждым процессом 4) reduce частичных результатов Для параллельного интегрирования реализован следующий метод распределения: size процессов, первый берет на себя 1, 1+size, 1+2size отрезки; и так работает с каждым. В итоге Все процессы получают равномерное (насколько это возможно) количество отрезков интегрирования. --- .../func_tests/main.cpp | 261 ++++++++++++++++++ .../include/ops_mpi.hpp | 62 +++++ .../perf_tests/main.cpp | 89 ++++++ .../korablev_v_rect_int_mpi/src/ops_mpi.cpp | 126 +++++++++ .../func_tests/main.cpp | 146 ++++++++++ .../include/ops_seq.hpp | 31 +++ .../perf_tests/main.cpp | 88 ++++++ .../korablev_v_rect_int_seq/src/ops_seq.cpp | 58 ++++ 8 files changed, 861 insertions(+) create mode 100644 tasks/mpi/korablev_v_rect_int_mpi/func_tests/main.cpp create mode 100644 tasks/mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp create mode 100644 tasks/mpi/korablev_v_rect_int_mpi/perf_tests/main.cpp create mode 100644 tasks/mpi/korablev_v_rect_int_mpi/src/ops_mpi.cpp create mode 100644 tasks/seq/korablev_v_rect_int_seq/func_tests/main.cpp create mode 100644 tasks/seq/korablev_v_rect_int_seq/include/ops_seq.hpp create mode 100644 tasks/seq/korablev_v_rect_int_seq/perf_tests/main.cpp create mode 100644 tasks/seq/korablev_v_rect_int_seq/src/ops_seq.cpp diff --git a/tasks/mpi/korablev_v_rect_int_mpi/func_tests/main.cpp b/tasks/mpi/korablev_v_rect_int_mpi/func_tests/main.cpp new file mode 100644 index 00000000000..5ccf20946d6 --- /dev/null +++ b/tasks/mpi/korablev_v_rect_int_mpi/func_tests/main.cpp @@ -0,0 +1,261 @@ +#define _USE_MATH_DEFINES +#include + +#include +#include +#include +#include + +#include "mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp" + +TEST(korablev_v_rect_int, test_constant_function) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 10.0; + int n = 1000000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} + +TEST(korablev_v_rect_int, test_square_function) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 5.0; + int n = 1000000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} + +TEST(korablev_v_rect_int, test_sine_function) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = M_PI; + int n = 1000000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} + +TEST(korablev_v_rect_int, test_exponential_function) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 1.0; + int n = 1000000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return std::exp(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return std::exp(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} + +TEST(korablev_v_rect_int, test_remainder_case) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 5.0; + int n = 10; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} diff --git a/tasks/mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp b/tasks/mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp new file mode 100644 index 00000000000..086c55eac1c --- /dev/null +++ b/tasks/mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace korablev_v_rect_int_mpi { + +class RectangularIntegrationSequential : public ppc::core::Task { + public: + explicit RectangularIntegrationSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + static double integrate(const std::function& f, double a, double b, int n); + double a_{}; + double b_{}; + int n_{}; + double result_{}; + std::function func_; +}; + +class RectangularIntegrationParallel : public ppc::core::Task { + public: + explicit RectangularIntegrationParallel(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + double parallel_integrate(const std::function& f, double a, double b, int n); + + double a_{}; + double b_{}; + int n_{}; + double global_result_{}; + std::function func_; + + boost::mpi::communicator world; +}; + +} // namespace korablev_v_rect_int_mpi \ No newline at end of file diff --git a/tasks/mpi/korablev_v_rect_int_mpi/perf_tests/main.cpp b/tasks/mpi/korablev_v_rect_int_mpi/perf_tests/main.cpp new file mode 100644 index 00000000000..2c10b720b9f --- /dev/null +++ b/tasks/mpi/korablev_v_rect_int_mpi/perf_tests/main.cpp @@ -0,0 +1,89 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp" + +TEST(korablev_v_rect_int, test_pipeline_run) { + boost::mpi::communicator world; + double a = 0.0; + double b = 1.0; + int n = 1000000; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(&a)); + taskDataPar->inputs.push_back(reinterpret_cast(&b)); + taskDataPar->inputs.push_back(reinterpret_cast(&n)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + testMpiTaskParallel->set_function([](double x) { return x * x; }); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double exact = 1.0 / 3.0; + EXPECT_NEAR(output, exact, 1e-4); + } +} + +TEST(korablev_v_rect_int, test_task_run) { + boost::mpi::communicator world; + double a = 0.0; + double b = 1.0; + int n = 1000000; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(&a)); + taskDataPar->inputs.push_back(reinterpret_cast(&b)); + taskDataPar->inputs.push_back(reinterpret_cast(&n)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + testMpiTaskParallel->set_function([](double x) { return x * x; }); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double exact = 1.0 / 3.0; + EXPECT_NEAR(output, exact, 1e-4); + } +} \ No newline at end of file diff --git a/tasks/mpi/korablev_v_rect_int_mpi/src/ops_mpi.cpp b/tasks/mpi/korablev_v_rect_int_mpi/src/ops_mpi.cpp new file mode 100644 index 00000000000..3416d8ed454 --- /dev/null +++ b/tasks/mpi/korablev_v_rect_int_mpi/src/ops_mpi.cpp @@ -0,0 +1,126 @@ +#include "mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool korablev_v_rect_int_mpi::RectangularIntegrationSequential::pre_processing() { + internal_order_test(); + + auto* tmp_ptr_a = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_b = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_n = reinterpret_cast(taskData->inputs[2]); + + a_ = *tmp_ptr_a; + b_ = *tmp_ptr_b; + n_ = *tmp_ptr_n; + + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationSequential::run() { + internal_order_test(); + result_ = integrate(func_, a_, b_, n_); + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result_; + return true; +} + +double korablev_v_rect_int_mpi::RectangularIntegrationSequential::integrate(const std::function& f, + double a, double b, int n) { + double h = (b - a) / n; + double sum = 0.0; + + for (int i = 0; i < n; ++i) { + double x = a + i * h; + sum += f(x) * h; + } + + return sum; +} + +void korablev_v_rect_int_mpi::RectangularIntegrationSequential::set_function( + const std::function& func) { + func_ = func; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto* tmp_ptr_a = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_b = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_n = reinterpret_cast(taskData->inputs[2]); + + a_ = *tmp_ptr_a; + b_ = *tmp_ptr_b; + n_ = *tmp_ptr_n; + } + + broadcast(world, a_, 0); + broadcast(world, b_, 0); + broadcast(world, n_, 0); + + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationParallel::run() { + internal_order_test(); + double local_result_{}; + local_result_ = parallel_integrate(func_, a_, b_, n_); + reduce(world, local_result_, global_result_, std::plus<>(), 0); + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = global_result_; + } + return true; +} + +double korablev_v_rect_int_mpi::RectangularIntegrationParallel::parallel_integrate( + const std::function& f, double a, double b, int n) { + int rank = world.rank(); + int size = world.size(); + + double h = (b - a) / n; + double local_sum = 0.0; + + for (int i = rank; i < n; i += size) { + double x = a + i * h; + local_sum += f(x) * h; + } + + return local_sum; +} + +void korablev_v_rect_int_mpi::RectangularIntegrationParallel::set_function(const std::function& func) { + func_ = func; +} diff --git a/tasks/seq/korablev_v_rect_int_seq/func_tests/main.cpp b/tasks/seq/korablev_v_rect_int_seq/func_tests/main.cpp new file mode 100644 index 00000000000..4c60614be6a --- /dev/null +++ b/tasks/seq/korablev_v_rect_int_seq/func_tests/main.cpp @@ -0,0 +1,146 @@ +#define _USE_MATH_DEFINES +#include + +#include +#include +#include + +#include "seq/korablev_v_rect_int_seq/include/ops_seq.hpp" + +TEST(korablev_v_rectangular_integration_seq, test_integration_x_squared) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + ASSERT_TRUE(testTaskSequential->validation()); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rectangular_integration_seq, test_integration_x) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + + const double expected_result = 0.5; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + korablev_v_rect_int_seq::RectangularIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return x; }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rectangular_integration_seq, test_integration_sin_x) { + const double a = 0.0; + const double b = M_PI; + const int n = 1000; + + const double expected_result = 2.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + korablev_v_rect_int_seq::RectangularIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return std::sin(x); }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rectangular_integration_seq, test_integration_exp_x) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + + const double expected_result = std::exp(1.0) - 1.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + korablev_v_rect_int_seq::RectangularIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return std::exp(x); }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rectangular_integration_seq, test_set_function) { + std::vector in = {0.0, 1.0, 1000}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + korablev_v_rect_int_seq::RectangularIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential.set_function(func); + + double x = 2.0; + double expected_result = 4.0; + ASSERT_EQ(func(x), expected_result); +} \ No newline at end of file diff --git a/tasks/seq/korablev_v_rect_int_seq/include/ops_seq.hpp b/tasks/seq/korablev_v_rect_int_seq/include/ops_seq.hpp new file mode 100644 index 00000000000..da77c9c628e --- /dev/null +++ b/tasks/seq/korablev_v_rect_int_seq/include/ops_seq.hpp @@ -0,0 +1,31 @@ +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace korablev_v_rect_int_seq { + +class RectangularIntegrationSequential : public ppc::core::Task { + public: + explicit RectangularIntegrationSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + static double integrate(const std::function& f, double a, double b, int n); + + double a_{}; + double b_{}; + int n_{}; + double result_{}; + std::function func_; +}; + +} // namespace korablev_v_rect_int_seq \ No newline at end of file diff --git a/tasks/seq/korablev_v_rect_int_seq/perf_tests/main.cpp b/tasks/seq/korablev_v_rect_int_seq/perf_tests/main.cpp new file mode 100644 index 00000000000..c2390fc5b41 --- /dev/null +++ b/tasks/seq/korablev_v_rect_int_seq/perf_tests/main.cpp @@ -0,0 +1,88 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/korablev_v_rect_int_seq/include/ops_seq.hpp" + +TEST(korablev_v_rect_int_seq, test_pipeline_run) { + const double a = 0.0; + const double b = 1.0; + const int n = 10000; + + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rect_int_seq, test_task_run) { + const double a = 0.0; + const double b = 1.0; + const int n = 10000; + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + + perfAnalyzer->task_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} diff --git a/tasks/seq/korablev_v_rect_int_seq/src/ops_seq.cpp b/tasks/seq/korablev_v_rect_int_seq/src/ops_seq.cpp new file mode 100644 index 00000000000..e8d45c7853a --- /dev/null +++ b/tasks/seq/korablev_v_rect_int_seq/src/ops_seq.cpp @@ -0,0 +1,58 @@ +#include "seq/korablev_v_rect_int_seq/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool korablev_v_rect_int_seq::RectangularIntegrationSequential::pre_processing() { + internal_order_test(); + + auto* inputs = reinterpret_cast(taskData->inputs[0]); + + a_ = inputs[0]; + b_ = inputs[1]; + n_ = static_cast(inputs[2]); + + result_ = 0.0; + return true; +} + +bool korablev_v_rect_int_seq::RectangularIntegrationSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; +} + +bool korablev_v_rect_int_seq::RectangularIntegrationSequential::run() { + internal_order_test(); + + result_ = integrate(func_, a_, b_, n_); + + return true; +} + +bool korablev_v_rect_int_seq::RectangularIntegrationSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; +} + +double korablev_v_rect_int_seq::RectangularIntegrationSequential::integrate(const std::function& f, + double a, double b, int n) { + double step = (b - a) / n; + double area = 0.0; + + for (int i = 0; i < n; ++i) { + double x = a + (i + 0.5) * step; + area += f(x) * step; + } + + return area; +} + +void korablev_v_rect_int_seq::RectangularIntegrationSequential::set_function( + const std::function& func) { + func_ = func; +} From 862e6d0c9b74cee4e11f4b619323faa6d88a6ba2 Mon Sep 17 00:00:00 2001 From: shvedovav <112872669+shvedovav@users.noreply.github.com> Date: Fri, 25 Oct 2024 21:25:36 +0300 Subject: [PATCH 005/155] =?UTF-8?q?=D0=A8=D0=B2=D0=B5=D0=B4=D0=BE=D0=B2?= =?UTF-8?q?=D0=B0=20=D0=92=D0=B8=D1=82=D0=B0=D0=BB=D0=B8=D0=BD=D0=B0.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2023.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87?= =?UTF-8?q?=D0=B5=D1=82=20=D1=87=D0=B0=D1=81=D1=82=D0=BE=D1=82=D1=8B=20?= =?UTF-8?q?=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=B0=20=D0=B2=20=D1=81?= =?UTF-8?q?=D1=82=D1=80=D0=BE=D0=BA=D0=B5=20(#24)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Последовательная версия:** На вход передается строка и целевой символ. Подсчет идет с использованием std::count. В ответе целое число - количество повторений символа в строке. **Параллельная версия (MPI):** Происходит равномерное распределение целевой строки с помощью broadcast между всеми процессами. Затем каждый процесс проводит подсчет целевого символа в своей части строки, затем с помощью reduce происходит объединение результатов. --- .../shvedova_v_char_freq/func_tests/main.cpp | 323 ++++++++++++++++++ .../shvedova_v_char_freq/include/ops_mpi.hpp | 53 +++ .../shvedova_v_char_freq/perf_tests/main.cpp | 89 +++++ .../mpi/shvedova_v_char_freq/src/ops_mpi.cpp | 110 ++++++ .../shvedova_v_char_freq/func_tests/main.cpp | 156 +++++++++ .../shvedova_v_char_freq/include/ops_seq.hpp | 24 ++ .../shvedova_v_char_freq/perf_tests/main.cpp | 81 +++++ .../seq/shvedova_v_char_freq/src/ops_seq.cpp | 32 ++ 8 files changed, 868 insertions(+) create mode 100644 tasks/mpi/shvedova_v_char_freq/func_tests/main.cpp create mode 100644 tasks/mpi/shvedova_v_char_freq/include/ops_mpi.hpp create mode 100644 tasks/mpi/shvedova_v_char_freq/perf_tests/main.cpp create mode 100644 tasks/mpi/shvedova_v_char_freq/src/ops_mpi.cpp create mode 100644 tasks/seq/shvedova_v_char_freq/func_tests/main.cpp create mode 100644 tasks/seq/shvedova_v_char_freq/include/ops_seq.hpp create mode 100644 tasks/seq/shvedova_v_char_freq/perf_tests/main.cpp create mode 100644 tasks/seq/shvedova_v_char_freq/src/ops_seq.cpp diff --git a/tasks/mpi/shvedova_v_char_freq/func_tests/main.cpp b/tasks/mpi/shvedova_v_char_freq/func_tests/main.cpp new file mode 100644 index 00000000000..ef0f1f421ff --- /dev/null +++ b/tasks/mpi/shvedova_v_char_freq/func_tests/main.cpp @@ -0,0 +1,323 @@ +#include + +#include +#include +#include +#include + +#include "mpi/shvedova_v_char_freq/include/ops_mpi.hpp" + +TEST(shvedova_v_char_freq_mpi, test_all_same_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + shvedova_v_char_freq_mpi::CharFrequencySequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(shvedova_v_char_freq_mpi, test_no_occurrences) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'z'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + shvedova_v_char_freq_mpi::CharFrequencySequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(shvedova_v_char_freq_mpi, test_mixed_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'b'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::vector(count_size_str, 'a'); + for (int i = 0; i < count_size_str; i += 3) { + global_str[i] = 'b'; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + shvedova_v_char_freq_mpi::CharFrequencySequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(shvedova_v_char_freq_mpi, test_empty_string) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 0); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_1) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(1, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_2) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_3) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b', 'c'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_5) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b', 'c', 'a', 'b'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 2); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_7) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b', 'c', 'a', 'b', 'c', 'b'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 2); + } +} \ No newline at end of file diff --git a/tasks/mpi/shvedova_v_char_freq/include/ops_mpi.hpp b/tasks/mpi/shvedova_v_char_freq/include/ops_mpi.hpp new file mode 100644 index 00000000000..1bce11fc9ef --- /dev/null +++ b/tasks/mpi/shvedova_v_char_freq/include/ops_mpi.hpp @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace shvedova_v_char_freq_mpi { + +std::vector getRandomVector(int sz); + +class CharFrequencySequential : public ppc::core::Task { + public: + explicit CharFrequencySequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_str_; + char target_char_; + int res{}; +}; + +class CharFrequencyParallel : public ppc::core::Task { + public: + explicit CharFrequencyParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_str_; + std::vector local_input_; + char target_char_; + int res{}; + int local_res{}; + + boost::mpi::communicator world; +}; + +} // namespace shvedova_v_char_freq_mpi \ No newline at end of file diff --git a/tasks/mpi/shvedova_v_char_freq/perf_tests/main.cpp b/tasks/mpi/shvedova_v_char_freq/perf_tests/main.cpp new file mode 100644 index 00000000000..5e25bffd1a0 --- /dev/null +++ b/tasks/mpi/shvedova_v_char_freq/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/shvedova_v_char_freq/include/ops_mpi.hpp" + +TEST(shvedova_v_char_freq_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + if (world.rank() == 0) { + count_size_str = 120; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_str, global_count[0]); + } +} + +TEST(shvedova_v_char_freq_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + if (world.rank() == 0) { + count_size_str = 120; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_str, global_count[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/shvedova_v_char_freq/src/ops_mpi.cpp b/tasks/mpi/shvedova_v_char_freq/src/ops_mpi.cpp new file mode 100644 index 00000000000..1cdb04cdf41 --- /dev/null +++ b/tasks/mpi/shvedova_v_char_freq/src/ops_mpi.cpp @@ -0,0 +1,110 @@ +#include "mpi/shvedova_v_char_freq/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool shvedova_v_char_freq_mpi::CharFrequencySequential::pre_processing() { + internal_order_test(); + + input_str_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_str_[i] = tmp_ptr[i]; + } + + target_char_ = *reinterpret_cast(taskData->inputs[1]); + res = 0; + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencySequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool shvedova_v_char_freq_mpi::CharFrequencySequential::run() { + internal_order_test(); + + res = std::count(input_str_.begin(), input_str_.end(), target_char_); + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencySequential::post_processing() { + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencyParallel::pre_processing() { + internal_order_test(); + + int myid = world.rank(); + int world_size = world.size(); + unsigned int n = 0; + + if (myid == 0) { + n = taskData->inputs_count[0]; + input_str_ = std::vector(n); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + memcpy(input_str_.data(), tmp_ptr, sizeof(char) * n); + target_char_ = *reinterpret_cast(taskData->inputs[1]); + } + + boost::mpi::broadcast(world, n, 0); + boost::mpi::broadcast(world, target_char_, 0); + + unsigned int vec_send_size = n / world_size; + unsigned int overflow_size = n % world_size; + std::vector send_counts(world_size, vec_send_size); + std::vector displs(world_size, 0); + + for (unsigned int i = 0; i < static_cast(world_size); ++i) { + if (i < static_cast(overflow_size)) { + ++send_counts[i]; + } + if (i > 0) { + displs[i] = displs[i - 1] + send_counts[i - 1]; + } + } + + auto loc_vec_size = static_cast(send_counts[myid]); + local_input_.resize(loc_vec_size); + + boost::mpi::scatterv(world, input_str_.data(), send_counts, displs, local_input_.data(), loc_vec_size, 0); + + local_res = 0; + res = 0; + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencyParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencyParallel::run() { + internal_order_test(); + local_res = std::count(local_input_.begin(), local_input_.end(), target_char_); + + boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencyParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + + return true; +} \ No newline at end of file diff --git a/tasks/seq/shvedova_v_char_freq/func_tests/main.cpp b/tasks/seq/shvedova_v_char_freq/func_tests/main.cpp new file mode 100644 index 00000000000..599cee6f272 --- /dev/null +++ b/tasks/seq/shvedova_v_char_freq/func_tests/main.cpp @@ -0,0 +1,156 @@ +#include + +#include +#include + +#include "seq/shvedova_v_char_freq/include/ops_seq.hpp" + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_a_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'a'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_b_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'b'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_c_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'c'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_x_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'x'; + int expected_frequency = 0; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_a_in_long_string) { + std::string input_str(1000000, 'a'); + char target_char = 'a'; + int expected_frequency = 1000000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_in_empty_string) { + std::string input_str; + char target_char = 'a'; + int expected_frequency = 0; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/shvedova_v_char_freq/include/ops_seq.hpp b/tasks/seq/shvedova_v_char_freq/include/ops_seq.hpp new file mode 100644 index 00000000000..1d51823bbd6 --- /dev/null +++ b/tasks/seq/shvedova_v_char_freq/include/ops_seq.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace shvedova_v_char_frequency_seq { + +class CharFrequencyTaskSequential : public ppc::core::Task { + public: + explicit CharFrequencyTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_str_; + char target_char_; + int frequency_ = 0; +}; + +} // namespace shvedova_v_char_frequency_seq diff --git a/tasks/seq/shvedova_v_char_freq/perf_tests/main.cpp b/tasks/seq/shvedova_v_char_freq/perf_tests/main.cpp new file mode 100644 index 00000000000..877e0c430a1 --- /dev/null +++ b/tasks/seq/shvedova_v_char_freq/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/shvedova_v_char_freq/include/ops_seq.hpp" + +TEST(shvedova_v_char_frequency_seq, test_pipeline_run) { + std::string input_str(100000, 'a'); + char target_char = 'a'; + int expected_frequency = 100000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_task_run) { + std::string input_str(100000, 'a'); + char target_char = 'a'; + int expected_frequency = 100000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_frequency, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/shvedova_v_char_freq/src/ops_seq.cpp b/tasks/seq/shvedova_v_char_freq/src/ops_seq.cpp new file mode 100644 index 00000000000..9b24750ee14 --- /dev/null +++ b/tasks/seq/shvedova_v_char_freq/src/ops_seq.cpp @@ -0,0 +1,32 @@ +#include "seq/shvedova_v_char_freq/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool shvedova_v_char_frequency_seq::CharFrequencyTaskSequential::pre_processing() { + internal_order_test(); + input_str_ = *reinterpret_cast(taskData->inputs[0]); + target_char_ = *reinterpret_cast(taskData->inputs[1]); + frequency_ = 0; + return true; +} + +bool shvedova_v_char_frequency_seq::CharFrequencyTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 1 && taskData->inputs_count[1] == 1 && taskData->outputs_count[0] == 1; +} + +bool shvedova_v_char_frequency_seq::CharFrequencyTaskSequential::run() { + internal_order_test(); + frequency_ = std::count(input_str_.begin(), input_str_.end(), target_char_); + return true; +} + +bool shvedova_v_char_frequency_seq::CharFrequencyTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = frequency_; + return true; +} \ No newline at end of file From e4692065ce3d5d6f00748d01f8cbdc0cf56d9728 Mon Sep 17 00:00:00 2001 From: "Michael K." <130953568+kmichaelk@users.noreply.github.com> Date: Sat, 26 Oct 2024 02:52:30 +0300 Subject: [PATCH 006/155] Remove sleep (#42) --- .../mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp b/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp index 9513f6a0f15..516dbd7b6a6 100644 --- a/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp +++ b/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp @@ -188,8 +188,6 @@ class TestMPITaskSequential : public ppc::core::Task { } } - std::this_thread::sleep_for(20ms); - return true; } From 843b1161ddbd8b886e3a8ded1fe71e09bf9ce601 Mon Sep 17 00:00:00 2001 From: Alexey Chistov <112825972+Alexey2013@users.noreply.github.com> Date: Sat, 26 Oct 2024 03:02:47 +0300 Subject: [PATCH 007/155] =?UTF-8?q?=D0=A7=D0=B8=D1=81=D1=82=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B5=D0=B9.=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=2010.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D1=8D=D0=BB?= =?UTF-8?q?=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=BC=D0=B0=D1=82?= =?UTF-8?q?=D1=80=D0=B8=D1=86=D1=8B=20(#13)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../func_tests/main.cpp | 255 ++++++++++++++++++ .../include/ops_mpi.hpp | 74 +++++ .../perf_tests/main.cpp | 89 ++++++ .../src/ops_mpi.cpp | 105 ++++++++ .../func_tests/main.cpp | 113 ++++++++ .../include/ops_seq.hpp | 48 ++++ .../perf_tests/main.cpp | 81 ++++++ .../src/ops_seq.cpp | 43 +++ 8 files changed, 808 insertions(+) create mode 100644 tasks/mpi/chistov_a_sum_of_matrix_elements/func_tests/main.cpp create mode 100644 tasks/mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/chistov_a_sum_of_matrix_elements/func_tests/main.cpp create mode 100644 tasks/seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp create mode 100644 tasks/seq/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp create mode 100644 tasks/seq/chistov_a_sum_of_matrix_elements/src/ops_seq.cpp diff --git a/tasks/mpi/chistov_a_sum_of_matrix_elements/func_tests/main.cpp b/tasks/mpi/chistov_a_sum_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..521e385b149 --- /dev/null +++ b/tasks/mpi/chistov_a_sum_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,255 @@ +#include + +#include +#include +#include + +#include "mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp" + +TEST(chistov_a_sum_of_matrix_elements, test_wrong_validation_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(2, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int n = 3; + const int m = 4; + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel TestMPITaskParallel(taskDataPar); + ASSERT_EQ(TestMPITaskParallel.validation(), false); + } +} + +TEST(chistov_a_sum_of_matrix_elements, test_int_sum_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + const int n = 3; + const int m = 4; + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + chistov_a_sum_of_matrix_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(chistov_a_sum_of_matrix_elements, test_double_sum_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + const int n = 3; + const int m = 4; + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + chistov_a_sum_of_matrix_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_sum[0], global_sum[0], 1e-6); + } +} + +TEST(chistov_a_sum_of_matrix_elements, test_with_empty_matrix_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + const int n = 0; + const int m = 0; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_sum[0], 0); + } +} + +TEST(chistov_a_sum_of_matrix_elements, returns_empty_matrix_when_small_n_or_m_) { + auto matrix1 = chistov_a_sum_of_matrix_elements::get_random_matrix(0, 1); + EXPECT_TRUE(matrix1.empty()); + auto matrix2 = chistov_a_sum_of_matrix_elements::get_random_matrix(1, 0); + EXPECT_TRUE(matrix2.empty()); +} + +TEST(chistov_a_sum_of_matrix_elements, test_with_large_matrix_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + const int n = 1000; + const int m = 1000; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + for (int val : global_matrix) { + reference_sum[0] += val; + } + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(chistov_a_sum_of_matrix_elements, short_and_thick_test_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + + const int n = 1000000; + const int m = 1; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + for (int val : global_matrix) { + reference_sum[0] += val; + } + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(chistov_a_sum_of_matrix_elements, long_and_thin_test_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + + const int n = 1; + const int m = 100000; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + for (int val : global_matrix) { + reference_sum[0] += val; + } + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} diff --git a/tasks/mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp b/tasks/mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..0c36b36a86c --- /dev/null +++ b/tasks/mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp @@ -0,0 +1,74 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace chistov_a_sum_of_matrix_elements { +template +std::vector get_random_matrix(const int n, const int m) { + if (n <= 0 || m <= 0) { + return std::vector(); + } + + std::vector matrix(n * m); + for (int i = 0; i < n * m; ++i) { + matrix[i] = static_cast((std::rand() % 201) - 100); + } + return matrix; +} + +template +T classic_way(const std::vector matrix, const int n, const int m) { + T result = 0; + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + result += matrix[i * m + j]; + } + } + return result; +} + +template +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + T res{}; +}; + +template +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + T res{}; + int n{}; + int m{}; + boost::mpi::communicator world; +}; + +} // namespace chistov_a_sum_of_matrix_elements diff --git a/tasks/mpi/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp b/tasks/mpi/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..d78e59e527d --- /dev/null +++ b/tasks/mpi/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,89 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp" + +TEST(chistov_a_sum_of_matrix_elements, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + const int n = 4000; + const int m = 4000; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = std::vector(n * m, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared>(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(std::accumulate(global_matrix.begin(), global_matrix.end(), 0), global_sum[0]); + } +} + +TEST(chistov_a_sum_of_matrix_elements, test_task_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + const int n = 6000; + const int m = 6000; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = std::vector(n * m, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared>(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(std::accumulate(global_matrix.begin(), global_matrix.end(), 0), global_sum[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp b/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..b0341e12788 --- /dev/null +++ b/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp @@ -0,0 +1,105 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp" + +namespace chistov_a_sum_of_matrix_elements { + +template +bool TestMPITaskSequential::pre_processing() { + internal_order_test(); + + T* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_.assign(tmp_ptr, tmp_ptr + taskData->inputs_count[0]); + return true; +} + +template +bool TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +template +bool TestMPITaskSequential::run() { + internal_order_test(); + + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +template +bool TestMPITaskSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +template +bool TestMPITaskParallel::pre_processing() { + internal_order_test(); + + int delta = 0; + if (world.rank() == 0) { + n = static_cast(taskData->inputs_count[1]); + m = static_cast(taskData->inputs_count[2]); + delta = (n * m) / world.size(); + } + + boost::mpi::broadcast(world, delta, 0); + + if (world.rank() == 0) { + input_ = std::vector(n * m); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < static_cast(taskData->inputs_count[0]); i++) { + input_[i] = tmp_ptr[i]; + } + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta); + } + } + + local_input_ = std::vector(delta); + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + return true; +} + +template +bool TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + return (taskData->outputs_count[0] == 1 && !(taskData->inputs.empty())); + } + return true; +} + +template +bool TestMPITaskParallel::run() { + internal_order_test(); + + T local_res = std::accumulate(local_input_.begin(), local_input_.end(), 0); + reduce(world, local_res, res, std::plus(), 0); + + return true; +} + +template +bool TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} + +template class TestMPITaskSequential; +template class TestMPITaskSequential; +template class TestMPITaskParallel; +template class TestMPITaskParallel; + +} // namespace chistov_a_sum_of_matrix_elements diff --git a/tasks/seq/chistov_a_sum_of_matrix_elements/func_tests/main.cpp b/tasks/seq/chistov_a_sum_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..c9413ee86a9 --- /dev/null +++ b/tasks/seq/chistov_a_sum_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,113 @@ +#include + +#include + +#include "seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp" + +TEST(chistov_a_sum_of_matrix_elements_seq, test_int_sum_sequential) { + const int n = 3; + const int m = 4; + std::vector global_matrix = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(n, m); + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + int sum = chistov_a_sum_of_matrix_elements_seq::classic_way_seq(global_matrix, n, m); + ASSERT_EQ(reference_sum[0], sum); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_double_sum_sequential) { + const int n = 3; + const int m = 4; + std::vector global_matrix = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(n, m); + std::vector reference_sum(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + double sum = chistov_a_sum_of_matrix_elements_seq::classic_way_seq(global_matrix, n, m); + + ASSERT_NEAR(reference_sum[0], sum, 1e-6); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_sum_with_empty_matrix_sequential) { + std::vector reference_sum(1, 0); + std::vector empty_matrix; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(empty_matrix.data())); + taskDataSeq->inputs_count.emplace_back(empty_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], 0); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_sum_with_single_element_matrix_sequential) { + const int n = 1; + const int m = 1; + std::vector global_matrix = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(n, m); + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + int sum = chistov_a_sum_of_matrix_elements_seq::classic_way_seq(global_matrix, n, m); + ASSERT_EQ(reference_sum[0], sum); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, returns_empty_matrix_when_small_n_or_m_sequential) { + auto matrix1 = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(0, 1); + EXPECT_TRUE(matrix1.empty()); + auto matrix2 = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(1, 0); + EXPECT_TRUE(matrix2.empty()); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_wrong_validation_sequential) { + std::vector global_matrix; + std::vector global_sum(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + const int n = 3; + const int m = 4; + global_matrix = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(n, m); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataSeq->outputs_count.emplace_back(global_sum.size()); + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), false); +} diff --git a/tasks/seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp b/tasks/seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..60ae08fd01e --- /dev/null +++ b/tasks/seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp @@ -0,0 +1,48 @@ +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace chistov_a_sum_of_matrix_elements_seq { + +template +std::vector get_random_matrix_seq(const int n, const int m) { + if (n <= 0 || m <= 0) { + return std::vector(); + } + + std::vector matrix(n * m); + for (int i = 0; i < n * m; ++i) { + matrix[i] = static_cast((std::rand() % 201) - 100); + } + return matrix; +} + +template +T classic_way_seq(const std::vector matrix, const int n, const int m) { + T result = 0; + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + result += matrix[i * m + j]; + } + } + return result; +} + +template +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + T res{}; +}; + +} // namespace chistov_a_sum_of_matrix_elements_seq diff --git a/tasks/seq/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp b/tasks/seq/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..c0c75db9363 --- /dev/null +++ b/tasks/seq/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp" + +TEST(chistov_a_sum_of_matrix_elements_seq, test_pipeline_run_seq) { + const int n = 4000; + const int m = 3500; + + // Create data + std::vector in(n * m, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(std::accumulate(in.begin(), in.end(), 0), out[0]); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_task_run_seq) { + const int n = 6000; + const int m = 6000; + + std::vector in(n * m, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(std::accumulate(in.begin(), in.end(), 0), out[0]); +} \ No newline at end of file diff --git a/tasks/seq/chistov_a_sum_of_matrix_elements/src/ops_seq.cpp b/tasks/seq/chistov_a_sum_of_matrix_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..6f2a540345b --- /dev/null +++ b/tasks/seq/chistov_a_sum_of_matrix_elements/src/ops_seq.cpp @@ -0,0 +1,43 @@ +#include "seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp" + +namespace chistov_a_sum_of_matrix_elements_seq { + +template +bool TestTaskSequential::pre_processing() { + internal_order_test(); + + T* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_.assign(tmp_ptr, tmp_ptr + taskData->inputs_count[0]); + return true; +} + +template +bool TestTaskSequential::validation() { + internal_order_test(); + + return taskData->outputs_count[0] == 1; +} + +template +bool TestTaskSequential::run() { + internal_order_test(); + + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +template +bool TestTaskSequential::post_processing() { + internal_order_test(); + + if (!taskData->outputs.empty() && taskData->outputs[0] != nullptr) { + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; + } + return false; +} + +template class TestTaskSequential; +template class TestTaskSequential; + +} // namespace chistov_a_sum_of_matrix_elements_seq From 2a47840a687ff3de1a3b09cb3d7daffc85894678 Mon Sep 17 00:00:00 2001 From: Nikita Korovin Date: Sat, 26 Oct 2024 03:03:25 +0300 Subject: [PATCH 008/155] =?UTF-8?q?=D0=9A=D0=BE=D1=80=D0=BE=D0=B2=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=9D=D0=B8=D0=BA=D0=B8=D1=82=D0=B0.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2017.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB?= =?UTF-8?q?=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D1=80=D0=BE=D0=BA?= =?UTF-8?q?=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#?= =?UTF-8?q?25)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../func_tests/main.cpp | 373 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 +++ .../perf_tests/main.cpp | 86 ++++ .../src/ops_mpi.cpp | 182 +++++++++ .../func_tests/main.cpp | 292 ++++++++++++++ .../include/ops_seq.hpp | 28 ++ .../perf_tests/main.cpp | 95 +++++ .../src/ops_seq.cpp | 77 ++++ 8 files changed, 1181 insertions(+) create mode 100644 tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp create mode 100644 tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp b/tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..1d1f282d360 --- /dev/null +++ b/tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp @@ -0,0 +1,373 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp" + +TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_10x10_matrix) { + boost::mpi::communicator world; + const int count_rows = 10; + const int count_columns = 10; + + std::vector> global_matrix; + std::vector global_min(count_rows, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(count_rows, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < count_rows; i++) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_100x100_matrix) { + boost::mpi::communicator world; + const int count_rows = 100; + const int count_columns = 100; + + std::vector> global_matrix; + std::vector global_min(count_rows, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(count_rows, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < count_rows; i++) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_100x500_matrix) { + boost::mpi::communicator world; + const int count_rows = 100; + const int count_columns = 500; + + std::vector> global_matrix; + std::vector global_min(count_rows, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(count_rows, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < count_rows; i++) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_5000x5000_matrix) { + boost::mpi::communicator world; + const int count_rows = 5000; + const int count_columns = 5000; + + std::vector> global_matrix; + std::vector global_min(count_rows, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(count_rows, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < count_rows; i++) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_input_empty_100x100_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_output_empty_100x100_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_less_two_cols_100x100_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_find_min_val_in_row_0x10_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 0; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_find_min_val_in_row_10x10_cols_0_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(0); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_fails_on_invalid_output_size) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows - 1, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp b/tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..863a412fa35 --- /dev/null +++ b/tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace korovin_n_min_val_row_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector generate_rnd_vector(int size, int lower_bound = 0, int upper_bound = 50); + static std::vector> generate_rnd_matrix(int rows, int cols); + + private: + std::vector> input_; + std::vector res_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector> local_input_; + std::vector res_; + boost::mpi::communicator world; +}; + +} // namespace korovin_n_min_val_row_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp b/tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..435f0ecba63 --- /dev/null +++ b/tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp @@ -0,0 +1,86 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp" + +TEST(korovin_n_min_val_row_matrix_mpi, test_pipeline_run_min) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows; + int count_columns; + + if (world.rank() == 0) { + count_rows = 5000; + count_columns = 5000; + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + global_min.resize(count_rows, INT_MAX); + + for (auto& row : global_matrix) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_min.size(); ++i) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi_perf_test, test_task_run_min) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows; + int count_columns; + + if (world.rank() == 0) { + count_rows = 5000; + count_columns = 5000; + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + global_min.resize(count_rows, INT_MAX); + + for (auto& row : global_matrix) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_min.size(); ++i) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp b/tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..832b8910d91 --- /dev/null +++ b/tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp @@ -0,0 +1,182 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = input_matrix[j]; + } + } + res_.resize(rows); + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + (taskData->outputs_count[0] == taskData->inputs_count[0])); +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + for (size_t i = 0; i < input_.size(); i++) { + int min_val = input_[i][0]; + for (size_t j = 1; j < input_[i].size(); j++) { + if (input_[i][j] < min_val) { + min_val = input_[i][j]; + } + } + res_[i] = min_val; + } + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matrix[i] = res_[i]; + } + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + int rows = 0; + int cols = 0; + + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + } + + broadcast(world, rows, 0); + broadcast(world, cols, 0); + + int delta = rows / world.size(); + int extra = rows % world.size(); + + if (world.rank() == 0) { + input_.resize(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + input_[i].assign(input_matrix, input_matrix + cols); + } + + for (int proc = 1; proc < world.size(); proc++) { + int start_row = proc * delta + std::min(proc, extra); + int num_rows = delta + (proc < extra ? 1 : 0); + for (int r = start_row; r < start_row + num_rows; r++) { + world.send(proc, 0, input_[r].data(), cols); + } + } + } + + int local_rows = delta + (world.rank() < extra ? 1 : 0); + + local_input_.resize(local_rows, std::vector(cols)); + + if (world.rank() == 0) { + std::copy(input_.begin(), input_.begin() + local_rows, local_input_.begin()); + } else { + for (int r = 0; r < local_rows; r++) { + world.recv(0, 0, local_input_[r].data(), cols); + } + } + + res_.resize(rows); + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + (taskData->outputs_count[0] == taskData->inputs_count[0])); + } + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + std::vector local_mins(local_input_.size(), INT_MAX); + for (size_t i = 0; i < local_input_.size(); i++) { + for (const auto& val : local_input_[i]) { + local_mins[i] = std::min(local_mins[i], val); + } + } + + if (world.rank() == 0) { + int current_ind = 0; + std::copy(local_mins.begin(), local_mins.end(), res_.begin()); + current_ind += local_mins.size(); + for (int proc = 1; proc < world.size(); proc++) { + int loc_size; + world.recv(proc, 0, &loc_size, 1); + std::vector loc_res_(loc_size); + world.recv(proc, 0, loc_res_.data(), loc_size); + copy(loc_res_.begin(), loc_res_.end(), res_.data() + current_ind); + current_ind += loc_res_.size(); + } + } else { + int loc_res__size = (int)local_mins.size(); + world.send(0, 0, &loc_res__size, 1); + world.send(0, 0, local_mins.data(), loc_res__size); + } + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + std::copy(res_.begin(), res_.end(), output_matrix); + } + + return true; +} + +std::vector korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_vector(int size, int lower_bound, + int upper_bound) { + std::vector v1(size); + for (auto& num : v1) { + num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return v1; +} + +std::vector> korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(int rows, + int cols) { + std::vector> matrix1(rows, std::vector(cols)); + for (auto& row : matrix1) { + row = generate_rnd_vector(cols, -1000, 1000); + int rnd_index = std::rand() % cols; + row[rnd_index] = INT_MIN; + } + return matrix1; +} diff --git a/tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp b/tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..e40afea053b --- /dev/null +++ b/tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp @@ -0,0 +1,292 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp" + +TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_10x10_matrix) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_100x500_matrix) { + const int rows = 100; + const int cols = 500; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_5000x5000_matrix) { + const int rows = 5000; + const int cols = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_input_empty_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_output_empty_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_less_two_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_less_two_cols_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_find_min_val_in_row_0x10_matrix) { + const int rows = 0; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_find_min_val_in_row_10x10_cols_0_matrix) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(0); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_fails_on_invalid_output_size) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows - 1, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} diff --git a/tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp b/tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..49ce7c430a7 --- /dev/null +++ b/tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp @@ -0,0 +1,28 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace korovin_n_min_val_row_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) { + std::srand(std::time(nullptr)); + } + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector generate_rnd_vector(int size, int lower_bound = 0, int upper_bound = 50); + static std::vector> generate_rnd_matrix(int rows, int cols); + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace korovin_n_min_val_row_matrix_seq \ No newline at end of file diff --git a/tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp b/tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..301f425150c --- /dev/null +++ b/tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp @@ -0,0 +1,95 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp" + +TEST(korovin_n_min_val_row_matrix_seq, test_pipeline_run) { + const int rows = 5000; + const int cols = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Set the number of runs as needed + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, test_task_run) { + const int rows = 5000; + const int cols = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} diff --git a/tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp b/tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..7d80ba6374c --- /dev/null +++ b/tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp @@ -0,0 +1,77 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = input_matrix[j]; + } + } + res_.resize(rows); + return true; +} + +bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + (taskData->outputs_count[0] == taskData->inputs_count[0])); +} + +bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + for (size_t i = 0; i < input_.size(); i++) { + int min_val = input_[i][0]; + for (size_t j = 1; j < input_[i].size(); j++) { + if (input_[i][j] < min_val) { + min_val = input_[i][j]; + } + } + res_[i] = min_val; + } + return true; +} + +bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matrix[i] = res_[i]; + } + return true; +} + +std::vector korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_vector(int size, int lower_bound, + int upper_bound) { + std::vector v1(size); + for (auto& num : v1) { + num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return v1; +} + +std::vector> korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(int rows, + int cols) { + std::vector> matrix1(rows, std::vector(cols)); + for (auto& row : matrix1) { + row = generate_rnd_vector(cols, -1000, 1000); + int rnd_index = std::rand() % cols; + row[rnd_index] = INT_MIN; + } + return matrix1; +} \ No newline at end of file From 1685a7efdf581ec99b08505ebeedc590238a9b11 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Sun, 27 Oct 2024 09:46:56 +0100 Subject: [PATCH 009/155] Remove minimal time limit (#45) --- modules/core/perf/include/perf.hpp | 1 - modules/core/perf/src/perf.cpp | 6 +++--- tasks/mpi/example/src/ops_mpi.cpp | 1 - tasks/omp/example/src/ops_omp.cpp | 1 - tasks/seq/example/src/ops_seq.cpp | 1 - tasks/stl/example/src/ops_stl.cpp | 1 - tasks/tbb/example/src/ops_tbb.cpp | 1 - 7 files changed, 3 insertions(+), 9 deletions(-) diff --git a/modules/core/perf/include/perf.hpp b/modules/core/perf/include/perf.hpp index 1270aaac3a9..8ed56e7d767 100644 --- a/modules/core/perf/include/perf.hpp +++ b/modules/core/perf/include/perf.hpp @@ -24,7 +24,6 @@ struct PerfResults { double time_sec = 0.0; enum TypeOfRunning { PIPELINE, TASK_RUN, NONE } type_of_running = NONE; constexpr const static double MAX_TIME = 10.0; - constexpr const static double MIN_TIME = 0.05; }; class Perf { diff --git a/modules/core/perf/src/perf.cpp b/modules/core/perf/src/perf.cpp index 3f47feabb28..4d56b9946f9 100644 --- a/modules/core/perf/src/perf.cpp +++ b/modules/core/perf/src/perf.cpp @@ -78,14 +78,14 @@ void ppc::core::Perf::print_perf_statistic(const std::shared_ptr& p relative_path.erase(last_found_position, relative_path.length() - 1); std::stringstream perf_res_str; - if (time_secs > PerfResults::MIN_TIME && time_secs < PerfResults::MAX_TIME) { + if (time_secs < PerfResults::MAX_TIME) { perf_res_str << std::fixed << std::setprecision(10) << time_secs; } else { std::cerr << "Task execute time need to be: "; - std::cerr << PerfResults::MIN_TIME << " secs. < time < " << PerfResults::MAX_TIME << " secs." << std::endl; + std::cerr << " time < " << PerfResults::MAX_TIME << " secs." << std::endl; std::cerr << "Original time in secs: " << time_secs; perf_res_str << std::fixed << std::setprecision(10) << -1.0; - EXPECT_TRUE(time_secs > PerfResults::MIN_TIME && time_secs < PerfResults::MAX_TIME); + EXPECT_TRUE(time_secs < PerfResults::MAX_TIME); } std::cout << relative_path << ":" << type_test_name << ":" << perf_res_str.str() << std::endl; diff --git a/tasks/mpi/example/src/ops_mpi.cpp b/tasks/mpi/example/src/ops_mpi.cpp index 2066953d1e3..af08ff9fde2 100644 --- a/tasks/mpi/example/src/ops_mpi.cpp +++ b/tasks/mpi/example/src/ops_mpi.cpp @@ -112,7 +112,6 @@ bool nesterov_a_test_task_mpi::TestMPITaskParallel::run() { } else if (ops == "max") { reduce(world, local_res, res, boost::mpi::maximum(), 0); } - std::this_thread::sleep_for(20ms); return true; } diff --git a/tasks/omp/example/src/ops_omp.cpp b/tasks/omp/example/src/ops_omp.cpp index 77fae981e09..6fa84eb99bc 100644 --- a/tasks/omp/example/src/ops_omp.cpp +++ b/tasks/omp/example/src/ops_omp.cpp @@ -50,7 +50,6 @@ bool nesterov_a_test_task_omp::TestOMPTaskSequential::run() { } else if (ops == "*") { res = std::accumulate(input_.begin(), input_.end(), 1, std::multiplies<>()); } - std::this_thread::sleep_for(20ms); return true; } diff --git a/tasks/seq/example/src/ops_seq.cpp b/tasks/seq/example/src/ops_seq.cpp index c1a5d4997f7..085ae82f5bf 100644 --- a/tasks/seq/example/src/ops_seq.cpp +++ b/tasks/seq/example/src/ops_seq.cpp @@ -24,7 +24,6 @@ bool nesterov_a_test_task_seq::TestTaskSequential::run() { for (int i = 0; i < input_; i++) { res++; } - std::this_thread::sleep_for(20ms); return true; } diff --git a/tasks/stl/example/src/ops_stl.cpp b/tasks/stl/example/src/ops_stl.cpp index 8ba9fbb6652..1943cdf7c14 100644 --- a/tasks/stl/example/src/ops_stl.cpp +++ b/tasks/stl/example/src/ops_stl.cpp @@ -48,7 +48,6 @@ bool nesterov_a_test_task_stl::TestSTLTaskSequential::run() { } else if (ops == "-") { res -= std::accumulate(input_.begin(), input_.end(), 0); } - std::this_thread::sleep_for(20ms); return true; } diff --git a/tasks/tbb/example/src/ops_tbb.cpp b/tasks/tbb/example/src/ops_tbb.cpp index edb5f2a4978..2abe556fa88 100644 --- a/tasks/tbb/example/src/ops_tbb.cpp +++ b/tasks/tbb/example/src/ops_tbb.cpp @@ -50,7 +50,6 @@ bool nesterov_a_test_task_tbb::TestTBBTaskSequential::run() { } else if (ops == "*") { res = std::accumulate(input_.begin(), input_.end(), 1, std::multiplies<>()); } - std::this_thread::sleep_for(20ms); return true; } From 86c3393a764bc7e571953fff702364d8e26caee3 Mon Sep 17 00:00:00 2001 From: dima-dimka04 <57627727+dima-dimka04@users.noreply.github.com> Date: Sun, 27 Oct 2024 17:21:50 +0300 Subject: [PATCH 010/155] =?UTF-8?q?=D0=94=D1=80=D0=BE=D0=B6=D0=B4=D0=B8?= =?UTF-8?q?=D0=BD=D0=BE=D0=B2=20=D0=94=D0=BC=D0=B8=D1=82=D1=80=D0=B8=D0=B9?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2012.=20=D0=A1=D1=83=D0=BC=D0=BC?= =?UTF-8?q?=D0=B0=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20?= =?UTF-8?q?=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE=D0=BB=D0=B1=D1=86=D0=B0=D0=BC?= =?UTF-8?q?=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#30)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Последовательная задача:** во вложенном цикле проходим по каждому столбцу, суммируем значения элементов, результат записываем в вектор. **Параллельная задача:** разделяем матрицу в зависимости от количества столбцов и процессов, рассчитываем delta - количество столбцов, которое получит каждый процесс, а также номер последнего обрабатываемого столбца для каждого процесса. Разделенные столбцы суммируются при помощи функции, используемой в последовательной задаче, результаты собираются функцией gatherv в общий вектор. --- .../func_tests/main.cpp | 276 ++++++++++++++++++ .../include/ops_mpi.hpp | 52 ++++ .../perf_tests/main.cpp | 106 +++++++ .../src/ops_mpi.cpp | 134 +++++++++ .../func_tests/main.cpp | 232 +++++++++++++++ .../include/ops_seq.hpp | 28 ++ .../perf_tests/main.cpp | 96 ++++++ .../src/ops_seq.cpp | 55 ++++ 8 files changed, 979 insertions(+) create mode 100644 tasks/mpi/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/drozhdinov_d_sum_cols_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp create mode 100644 tasks/seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/drozhdinov_d_sum_cols_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp b/tasks/mpi/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..e27d4e974be --- /dev/null +++ b/tasks/mpi/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp @@ -0,0 +1,276 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp" + +TEST(drozhdinov_d_sum_cols_matrix_mpi, EmptyMatrixTest) { + boost::mpi::communicator world; + + int cols = 0; + int rows = 0; + + // Create data + std::vector matrix = {}; + std::vector expres_par(cols, 0); + std::vector ans = {}; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} + +TEST(drozhdinov_d_sum_cols_matrix_mpi, RandomMatrixTest) { + boost::mpi::communicator world; + + int cols = 200; + int rows = 500; + + // Create data + std::vector matrix = drozhdinov_d_sum_cols_matrix_mpi::getRandomVector(cols * rows); + std::vector expres_par(cols, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} + +TEST(drozhdinov_d_sum_cols_matrix_mpi, ParallelTest1) { + boost::mpi::communicator world; + + int cols = 2; + int rows = 2; + + // Create data + std::vector matrix = {1, 0, 2, 1}; + std::vector expres_par(cols, 0); + std::vector ans = {3, 1}; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} + +TEST(drozhdinov_d_sum_cols_matrix_mpi, ParallelTest2) { + boost::mpi::communicator world; + + int cols = 1000; + int rows = 1000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres_par(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} + +TEST(drozhdinov_d_sum_cols_matrix_mpi, ParallelTest3) { + boost::mpi::communicator world; + + int cols = 2000; + int rows = 2000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres_par(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} \ No newline at end of file diff --git a/tasks/mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp b/tasks/mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..7f3076fecd4 --- /dev/null +++ b/tasks/mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp @@ -0,0 +1,52 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace drozhdinov_d_sum_cols_matrix_mpi { + +std::vector getRandomVector(int sz); +int makeLinCoords(int x, int y, int xSize); +std::vector calcMatSumSeq(const std::vector& matrix, int xSize, int ySize, int fromX, int toX); +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res; + int cols{}; + int rows{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res; + int cols{}; + int rows{}; + boost::mpi::communicator world; +}; + +} // namespace drozhdinov_d_sum_cols_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp b/tasks/mpi/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..1bafbed9243 --- /dev/null +++ b/tasks/mpi/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp @@ -0,0 +1,106 @@ +// Copyright 2023 Nesterov Alexander +// drozhdinov_d_sum_cols_matrix perf +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp" + +TEST(drozhdinov_d_sum_cols_matrix, test_pipeline_run) { + boost::mpi::communicator world; + + int cols = 5000; + int rows = 5000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataPar->outputs_count.emplace_back(expres.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expres, ans); + } +} + +TEST(drozhdinov_d_sum_cols_matrix, test_task_run) { + boost::mpi::communicator world; + int cols = 5000; + int rows = 5000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataPar->outputs_count.emplace_back(expres.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expres, ans); + } +} \ No newline at end of file diff --git a/tasks/mpi/drozhdinov_d_sum_cols_matrix/src/ops_mpi.cpp b/tasks/mpi/drozhdinov_d_sum_cols_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..45ba245b78c --- /dev/null +++ b/tasks/mpi/drozhdinov_d_sum_cols_matrix/src/ops_mpi.cpp @@ -0,0 +1,134 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector drozhdinov_d_sum_cols_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = (gen() % 100) - 49; + } + return vec; +} + +int drozhdinov_d_sum_cols_matrix_mpi::makeLinCoords(int x, int y, int xSize) { return y * xSize + x; } + +std::vector drozhdinov_d_sum_cols_matrix_mpi::calcMatSumSeq(const std::vector& matrix, int xSize, int ySize, + int fromX, int toX) { + std::vector result; + for (int x = fromX; x < toX; x++) { + int columnSum = 0; + for (int y = 0; y < ySize; y++) { + int linearizedCoordinate = makeLinCoords(x, y, xSize); + columnSum += matrix[linearizedCoordinate]; + } + result.push_back(columnSum); + } + return result; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + auto* ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = ptr[i]; + } + cols = taskData->inputs_count[1]; + rows = taskData->inputs_count[2]; + res = std::vector(cols, 0); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + res = calcMatSumSeq(input_, cols, rows, 0, cols); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < cols; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + rows = taskData->inputs_count[2]; + cols = taskData->inputs_count[1]; + } + broadcast(world, cols, 0); + broadcast(world, rows, 0); + // fbd nt ncssr dlt + if (world.rank() == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + } else { + input_ = std::vector(cols * rows); + } + broadcast(world, input_.data(), cols * rows, 0); + // Init value for output + res = std::vector(cols, 0); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == taskData->inputs_count[1]; + } + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int delta = cols / world.size(); + delta += (cols % world.size() == 0) ? 0 : 1; + int lastCol = std::min(cols, delta * (world.rank() + 1)); + auto localSum = calcMatSumSeq(input_, cols, rows, delta * world.rank(), lastCol); + localSum.resize(delta); + if (world.rank() == 0) { + std::vector localRes(cols + delta * world.size()); + std::vector sizes(world.size(), delta); + boost::mpi::gatherv(world, localSum.data(), localSum.size(), localRes.data(), sizes, 0); + localRes.resize(cols); + res = localRes; + } else { + boost::mpi::gatherv(world, localSum.data(), localSum.size(), 0); + } + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (int i = 0; i < cols; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} diff --git a/tasks/seq/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp b/tasks/seq/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..1b46665baa5 --- /dev/null +++ b/tasks/seq/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp @@ -0,0 +1,232 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp" + +TEST(drozhdinov_d_sum_cols_matrix_seq, EmptyMatrixTest) { + int cols = 0; + int rows = 0; + + // Create data + std::vector matrix = {}; + std::vector expres; + std::vector ans = {}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, SquareMatrixTests1) { + int cols = 2; + int rows = 2; + + // Create data + std::vector matrix = {1, 0, 2, 1}; + std::vector expres(cols, 0); + std::vector ans = {3, 1}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + // taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, SquareMatrixTests2) { + int cols = 2000; + int rows = 2000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, SquareMatrixTests3) { + int cols = 3500; + int rows = 3500; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, RectangleMatrixTests1) { + int cols = 4; + int rows = 1; + + // Create data + std::vector matrix = {1, 0, 2, 1}; + std::vector expres(cols, 0); + std::vector ans = {1, 0, 2, 1}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, RectangleMatrixTests2) { + int cols = 1; + int rows = 100; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[0] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, RectangleMatrixTests3) { + int cols = 2000; + int rows = 1000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, WrongValidationTest) { + int cols = 2; + int rows = 2; + + // Create data + std::vector matrix = {1, 0, 2, 1}; + std::vector expres(cols, 0); + std::vector ans = {3, 1}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(matrix.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} \ No newline at end of file diff --git a/tasks/seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp b/tasks/seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..37626dc5ff3 --- /dev/null +++ b/tasks/seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp @@ -0,0 +1,28 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +int makeLinCoords(int x, int y, int xSize); +std::vector calcMatrixSumSeq(const std::vector& matrix, int xSize, int ySize, int fromX, int toX); +namespace drozhdinov_d_sum_cols_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int rows{}; + int cols{}; + std::vector input_; + std::vector res; +}; + +} // namespace drozhdinov_d_sum_cols_matrix_seq \ No newline at end of file diff --git a/tasks/seq/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp b/tasks/seq/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..2b5e0c2f808 --- /dev/null +++ b/tasks/seq/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp @@ -0,0 +1,96 @@ +// Copyright 2023 Nesterov Alexander +// seq drozhdinov_d_sum_cols_matrix perf +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp" + +TEST(drozhdinov_d_sum_cols_matrix_seq, test_pipeline_run) { + int cols = 5000; + int rows = 5000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + // taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, test_task_run) { + int cols = 5000; + int rows = 5000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + // taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expres, ans); +} \ No newline at end of file diff --git a/tasks/seq/drozhdinov_d_sum_cols_matrix/src/ops_seq.cpp b/tasks/seq/drozhdinov_d_sum_cols_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..fe7ca169fd0 --- /dev/null +++ b/tasks/seq/drozhdinov_d_sum_cols_matrix/src/ops_seq.cpp @@ -0,0 +1,55 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +int makeLinCoords(int x, int y, int xSize) { return y * xSize + x; } + +std::vector calcMatrixSumSeq(const std::vector& matrix, int xSize, int ySize, int fromX, int toX) { + std::vector result; + for (int x = fromX; x < toX; x++) { + int columnSum = 0; + for (int y = 0; y < ySize; y++) { + int linearizedCoordinate = makeLinCoords(x, y, xSize); + columnSum += matrix[linearizedCoordinate]; + } + result.push_back(columnSum); + } + return result; +} + +bool drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + auto* ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = ptr[i]; + } + cols = taskData->inputs_count[1]; + rows = taskData->inputs_count[2]; + res = std::vector(cols, 0); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + res = calcMatrixSumSeq(input_, cols, rows, 0, cols); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < cols; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} From 3bce1febe3cfc5ef8df65a2221d9075a6b82d2ac Mon Sep 17 00:00:00 2001 From: m1likus <113242173+m1likus@users.noreply.github.com> Date: Sun, 27 Oct 2024 17:32:03 +0300 Subject: [PATCH 011/155] =?UTF-8?q?=D0=9A=D0=B0=D0=B1=D0=B0=D0=BB=D0=BE?= =?UTF-8?q?=D0=B2=D0=B0=20=D0=92=D0=B0=D0=BB=D0=B5=D1=80=D0=B8=D1=8F.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2022.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87?= =?UTF-8?q?=D0=B5=D1=82=20=D0=B1=D1=83=D0=BA=D0=B2=D0=B5=D0=BD=D0=BD=D1=8B?= =?UTF-8?q?=D1=85=20=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.=20(#40)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательная реализация: проходим по строке input_, проверяем каждый символ с помощью isalpha(). Если символ - буква верхнего или нижнего регистра, увеличиваем счетчик на 1. Параллельная реализация: Делим строку на количество процессов, получаем подстроки, и в каждой подстроке считаем количество букв. Проблема такого подхода заключается в том, что не всегда str.size % num_threads == 0, поэтому для последнего процесса нужно посчитать, сколько для него составляет разница между str.size и суммарной длинной всех остальных подстрок. Эту разницу вычесть из delta для последнего процесса. Для обобщения сделано так, что каждый процесс получает delta - 0, а последний процесс delta - разница. Также поддерживается возможность того, что длина строки меньше числа используемых процессов. Функции getRandomNumber и getRandomString позволяют генерировать строки произвольной длины. Сравнение результата mpi-версии происходит с результатом seq-версии. --- .../func_tests/main.cpp | 168 ++++++++++++++++++ .../include/count_symbols_mpi.hpp | 49 +++++ .../perf_tests/main.cpp | 90 ++++++++++ .../src/count_symbols_mpi.cpp | 142 +++++++++++++++ .../func_tests/main.cpp | 93 ++++++++++ .../include/count_symbols.hpp | 26 +++ .../perf_tests/main.cpp | 85 +++++++++ .../src/count_symbols.cpp | 44 +++++ 8 files changed, 697 insertions(+) create mode 100644 tasks/mpi/kabalova_v_count_symbols/func_tests/main.cpp create mode 100644 tasks/mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp create mode 100644 tasks/mpi/kabalova_v_count_symbols/perf_tests/main.cpp create mode 100644 tasks/mpi/kabalova_v_count_symbols/src/count_symbols_mpi.cpp create mode 100644 tasks/seq/kabalova_v_count_symbols/func_tests/main.cpp create mode 100644 tasks/seq/kabalova_v_count_symbols/include/count_symbols.hpp create mode 100644 tasks/seq/kabalova_v_count_symbols/perf_tests/main.cpp create mode 100644 tasks/seq/kabalova_v_count_symbols/src/count_symbols.cpp diff --git a/tasks/mpi/kabalova_v_count_symbols/func_tests/main.cpp b/tasks/mpi/kabalova_v_count_symbols/func_tests/main.cpp new file mode 100644 index 00000000000..8a54212a5d0 --- /dev/null +++ b/tasks/mpi/kabalova_v_count_symbols/func_tests/main.cpp @@ -0,0 +1,168 @@ +// Copyright 2024 Kabalova Valeria +#include + +#include +#include +#include + +#include "mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp" + +TEST(kabalova_v_count_symbols_mpi, EmptyString) { + boost::mpi::communicator world; + std::string global_str; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(0, global_out[0]); + } +} + +TEST(kabalova_v_count_symbols_mpi, FourSymbolStringNotLetter) { + boost::mpi::communicator world; + std::string global_str = "1234"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(kabalova_v_count_symbols_mpi, FourSymbolStringLetter) { + boost::mpi::communicator world; + std::string global_str = "abcd"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(kabalova_v_count_symbols_mpi, RandomString) { + boost::mpi::communicator world; + std::string global_str = kabalova_v_count_symbols_mpi::getRandomString(); + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} diff --git a/tasks/mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp b/tasks/mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp new file mode 100644 index 00000000000..298c037752a --- /dev/null +++ b/tasks/mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2024 Kabalova Valeria +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kabalova_v_count_symbols_mpi { + +int getRandomNumber(int left, int right); +std::string getRandomString(); +int countSymbols(std::string& str); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int result{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}, local_input_{}; + int result{}; + boost::mpi::communicator world; +}; + +} // namespace kabalova_v_count_symbols_mpi \ No newline at end of file diff --git a/tasks/mpi/kabalova_v_count_symbols/perf_tests/main.cpp b/tasks/mpi/kabalova_v_count_symbols/perf_tests/main.cpp new file mode 100644 index 00000000000..ea37ba37357 --- /dev/null +++ b/tasks/mpi/kabalova_v_count_symbols/perf_tests/main.cpp @@ -0,0 +1,90 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp" + +TEST(kabalova_v_count_symbols_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::string string = "string"; + std::string global_str; + for (int i = 0; i < 20000; i++) { + global_str += string; + } + + std::vector global_out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} + +TEST(kabalova_v_count_symbols_mpi, test_task_run) { + boost::mpi::communicator world; + std::string string = "string"; + std::string global_str; + for (int i = 0; i < 20000; i++) { + global_str += string; + } + + std::vector global_out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} \ No newline at end of file diff --git a/tasks/mpi/kabalova_v_count_symbols/src/count_symbols_mpi.cpp b/tasks/mpi/kabalova_v_count_symbols/src/count_symbols_mpi.cpp new file mode 100644 index 00000000000..bfd4c62a79d --- /dev/null +++ b/tasks/mpi/kabalova_v_count_symbols/src/count_symbols_mpi.cpp @@ -0,0 +1,142 @@ +// Copyright 2024 Kabalova Valeria +#include "mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +int kabalova_v_count_symbols_mpi::getRandomNumber(int left, int right) { + std::random_device dev; + std::mt19937 gen(dev()); + return ((gen() % (right - left + 1)) + left); +} + +std::string kabalova_v_count_symbols_mpi::getRandomString() { + std::string str; + std::string alphabet = "abcdefghijklmnopqrstuvwxyz1234567890"; + int strSize = getRandomNumber(1000, 20000); + for (int i = 0; i < strSize; i++) { + str += alphabet[getRandomNumber(0, alphabet.size() - 1)]; + } + return str; +} + +int kabalova_v_count_symbols_mpi::countSymbols(std::string& str) { + int result = 0; + for (size_t i = 0; i < str.size(); i++) { + if (isalpha(str[i]) != 0) { + result++; + } + } + return result; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + result = 0; + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // На выход подается 1 строка, на выходе только 1 число - число буквенных символов в строке. + bool flag1 = (taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1); + // Нам пришел массив char'ов? + bool flag2 = false; + if (typeid(*taskData->inputs[0]).name() == typeid(uint8_t).name()) { + flag2 = true; + } + return (flag1 && flag2); +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskSequential::run() { + internal_order_test(); + result = countSymbols(input_); + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + // Get delta = string.size() / num_threads + delta = taskData->inputs_count[0] % world.size() == 0 ? taskData->inputs_count[0] / world.size() + : taskData->inputs_count[0] / world.size() + 1; + } + broadcast(world, delta, 0); + // Initialize main string in root + // Then send substrings to processes + if (world.rank() == 0) { + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + for (int proc = 1; proc < world.size(); proc++) { + // input_size() / world.size() not always an integer + // so the last process sometimes gets memory access violation + // calculate this "delta" between input_.size() and proc * delta + // also if number of processes larger than world.size() then bufdelta is zero and they other processes get empty + // string + int bufDelta = 0; + if ((size_t)(proc * delta + delta) > input_.size() && (size_t)proc < input_.size()) { + bufDelta = input_.size() - proc * delta - delta; + } + world.send(proc, 0, input_.data() + proc * delta, delta + bufDelta); + } + } + // Initialize substring in root + if (world.rank() == 0) + local_input_ = input_.substr(0, delta); + else { + std::string buffer; + buffer.resize(delta); + // Other processes get substrings from root + world.recv(0, 0, buffer.data(), delta); + local_input_ = std::string(buffer.data(), delta); + } + result = 0; + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // 1 input string - 1 output number + bool flag1 = (taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1); + // Did we get array of chars? + bool flag2 = false; + if (typeid(*taskData->inputs[0]).name() == typeid(uint8_t).name()) { + flag2 = true; + } + return (flag1 && flag2); + } + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int local_result = 0; + // Count symbols in every substring + local_result = countSymbols(local_input_); + // Get sum and send it into result + reduce(world, local_result, result, std::plus(), 0); + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/kabalova_v_count_symbols/func_tests/main.cpp b/tasks/seq/kabalova_v_count_symbols/func_tests/main.cpp new file mode 100644 index 00000000000..11843e79221 --- /dev/null +++ b/tasks/seq/kabalova_v_count_symbols/func_tests/main.cpp @@ -0,0 +1,93 @@ +// Copyright 2024 Kabalova Valeria +#include + +#include + +#include "seq/kabalova_v_count_symbols/include/count_symbols.hpp" + +TEST(kabalova_v_count_symbols_seq, EmptyString) { + std::string str; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kabalova_v_count_symbols_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(kabalova_v_count_symbols_seq, OneSymbolStringNotLetter) { + std::string str = "1"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kabalova_v_count_symbols_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(kabalova_v_count_symbols_seq, OneSymbolStringLetter) { + std::string str = "a"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kabalova_v_count_symbols_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(kabalova_v_count_symbols_seq, string1) { + std::string str = "string;"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kabalova_v_count_symbols_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(6, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/kabalova_v_count_symbols/include/count_symbols.hpp b/tasks/seq/kabalova_v_count_symbols/include/count_symbols.hpp new file mode 100644 index 00000000000..fe95ea8339a --- /dev/null +++ b/tasks/seq/kabalova_v_count_symbols/include/count_symbols.hpp @@ -0,0 +1,26 @@ +// Copyright 2024 Kabalova Valeria +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kabalova_v_count_symbols_seq { + +int countSymbols(std::string& str); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int result{}; +}; + +} // namespace kabalova_v_count_symbols_seq \ No newline at end of file diff --git a/tasks/seq/kabalova_v_count_symbols/perf_tests/main.cpp b/tasks/seq/kabalova_v_count_symbols/perf_tests/main.cpp new file mode 100644 index 00000000000..6fe51aa19a8 --- /dev/null +++ b/tasks/seq/kabalova_v_count_symbols/perf_tests/main.cpp @@ -0,0 +1,85 @@ +// Copyright 2024 Kabalova Valeria +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kabalova_v_count_symbols/include/count_symbols.hpp" + +TEST(kabalova_v_count_symbols_seq_perf_test, test_pipeline_run) { + std::string string = "string"; + std::string str; + for (int i = 0; i < 20000; i++) { + str += string; + } + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} + +TEST(kabalova_v_count_symbols_seq_perf_test, test_task_run) { + std::string string = "string"; + std::string str; + for (int i = 0; i < 20000; i++) { + str += string; + } + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} \ No newline at end of file diff --git a/tasks/seq/kabalova_v_count_symbols/src/count_symbols.cpp b/tasks/seq/kabalova_v_count_symbols/src/count_symbols.cpp new file mode 100644 index 00000000000..3d07c9f2423 --- /dev/null +++ b/tasks/seq/kabalova_v_count_symbols/src/count_symbols.cpp @@ -0,0 +1,44 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/kabalova_v_count_symbols/include/count_symbols.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +int kabalova_v_count_symbols_seq::countSymbols(std::string& str) { + int result = 0; + for (size_t i = 0; i < str.size(); i++) { + if (isalpha(str[i]) != 0) { + result++; + } + } + return result; +} + +bool kabalova_v_count_symbols_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + result = 0; + return true; +} + +bool kabalova_v_count_symbols_seq::TestTaskSequential::validation() { + internal_order_test(); + // На выход подается 1 строка, на выходе только 1 число - число буквенных символов в строке. + return (taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1); +} + +bool kabalova_v_count_symbols_seq::TestTaskSequential::run() { + internal_order_test(); + result = countSymbols(input_); + return true; +} + +bool kabalova_v_count_symbols_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} From 56ee2b7057fd6a2019d2c53856792266337458d4 Mon Sep 17 00:00:00 2001 From: Kirill Khasanyanov <112872816+hasanyanovk@users.noreply.github.com> Date: Sun, 27 Oct 2024 17:44:25 +0300 Subject: [PATCH 012/155] =?UTF-8?q?=D0=A5=D0=B0=D1=81=D0=B0=D0=BD=D1=8C?= =?UTF-8?q?=D1=8F=D0=BD=D0=BE=D0=B2=20=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=202.=20=D0=A1=D1=80=D0=B5=D0=B4?= =?UTF-8?q?=D0=BD=D0=B5=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#29)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: 1. Алгоритм последовательно обходит вектор и считает сумму элементов 2. После чего делит сумму на количество элементов в векторе Описание параллельной задачи (MPI): 1. Входной массив данных и массив для записи результата помещаются root-процессом в структуру TaskData 2. Данные распределяются по процессам максимально равномерно, для каждого рассчитывается количество элементов и смещения относительно начала массива (ф-я displacement) 3. После этого root-процесс рассылает данные с помощью scatterv, остальные же процессы принимают свой объем данных из root с помощью той же функции 4. Каждый процесс подсчитывает сумму своего массива элементов 5. С помощью функции recv, root-процесс собирает суммы со всех процессов, получившееся значение делит на общее количество элементов и сохраняет результат --------- Co-authored-by: Michael K. <130953568+kmichaelk@users.noreply.github.com> --- .../func_tests/main.cpp | 126 +++++++++++ .../include/avg_mpi.hpp | 205 ++++++++++++++++++ .../perf_tests/main.cpp | 147 +++++++++++++ .../src/avg_mpi.cpp | 3 + .../func_tests/main.cpp | 54 +++++ .../include/avg_seq.hpp | 96 ++++++++ .../perf_tests/main.cpp | 68 ++++++ .../src/avg_seq.cpp | 3 + 8 files changed, 702 insertions(+) create mode 100644 tasks/mpi/khasanyanov_k_average_vector/func_tests/main.cpp create mode 100644 tasks/mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp create mode 100644 tasks/mpi/khasanyanov_k_average_vector/perf_tests/main.cpp create mode 100644 tasks/mpi/khasanyanov_k_average_vector/src/avg_mpi.cpp create mode 100644 tasks/seq/khasanyanov_k_average_vector/func_tests/main.cpp create mode 100644 tasks/seq/khasanyanov_k_average_vector/include/avg_seq.hpp create mode 100644 tasks/seq/khasanyanov_k_average_vector/perf_tests/main.cpp create mode 100644 tasks/seq/khasanyanov_k_average_vector/src/avg_seq.cpp diff --git a/tasks/mpi/khasanyanov_k_average_vector/func_tests/main.cpp b/tasks/mpi/khasanyanov_k_average_vector/func_tests/main.cpp new file mode 100644 index 00000000000..07d8f56e61f --- /dev/null +++ b/tasks/mpi/khasanyanov_k_average_vector/func_tests/main.cpp @@ -0,0 +1,126 @@ +#include +#include +#include +#include + +#include "../include/avg_mpi.hpp" +#include "core/task/include/task.hpp" +#include "gtest/gtest.h" + +//=========================================sequence========================================= + +#define FUNC_SEQ_TEST(InType, OutType, Size, Value) \ + \ + TEST(khasanyanov_k_average_vector_seq, test_seq_##InType##_##Size) { \ + std::vector in(Size, static_cast(Value)); \ + std::vector out(1, 0.0); \ + std::shared_ptr taskData = \ + khasanyanov_k_average_vector_mpi::create_task_data(in, out); \ + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential testTask(taskData); \ + RUN_TASK(testTask); \ + EXPECT_NEAR(out[0], static_cast(Value), 1e-5); \ + } + +#define RUN_FUNC_SEQ_TESTS(Size, Value) \ + FUNC_SEQ_TEST(int8_t, double, Size, Value) \ + FUNC_SEQ_TEST(int16_t, double, Size, Value) \ + FUNC_SEQ_TEST(int32_t, double, Size, Value) \ + FUNC_SEQ_TEST(int64_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint8_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint16_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint32_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint64_t, double, Size, Value) \ + FUNC_SEQ_TEST(double, double, Size, Value) \ + FUNC_SEQ_TEST(float, double, Size, Value) + +TEST(khasanyanov_k_average_vector_seq, test_random) { + std::vector in = khasanyanov_k_average_vector_mpi::get_random_vector(15); + std::vector out(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_mpi::create_task_data(in, out); + + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential testTask(taskData); + RUN_TASK(testTask); + + double expect_res = std::accumulate(in.begin(), in.end(), 0.0, std::plus()) / in.size(); + EXPECT_NEAR(out[0], expect_res, 1e-5); +} + +//=========================================parallel========================================= + +namespace mpi = boost::mpi; + +TEST(khasanyanov_k_average_vector_seq, test_displacement) { + auto displacement = khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::displacement(18, 4); + auto sizes = displacement.first; + auto displacements = displacement.second; + std::vector pattern_sizes{5, 5, 4, 4}; + std::vector pattern_displacements{0, 5, 10, 14}; + EXPECT_EQ(sizes, pattern_sizes); + EXPECT_EQ(displacements, pattern_displacements); +} + +TEST(khasanyanov_k_average_vector_mpi, test_wrong_input) { + mpi::communicator world; + std::vector in; + std::vector out; + std::shared_ptr taskData = std::make_shared(); + if (world.rank() == 0) { + taskData = khasanyanov_k_average_vector_mpi::create_task_data(in, out); + } + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel testTask(taskData); + if (world.rank() == 0) { + ASSERT_FALSE(testTask.validation()); + } +} + +#define FUNC_MPI_TEST(InType, OutType, Size) \ + TEST(khasanyanov_k_average_vector_mpi, test_mpi_##InType##_##Size) { \ + mpi::communicator world; \ + std::vector in = khasanyanov_k_average_vector_mpi::get_random_vector(Size); \ + std::vector out(1, 0.0); \ + std::shared_ptr taskData = std::make_shared(); \ + if (world.rank() == 0) { \ + taskData = khasanyanov_k_average_vector_mpi::create_task_data(in, out); \ + } \ + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel testTask(taskData); \ + RUN_TASK(testTask); \ + if (world.rank() == 0) { \ + std::vector seq_out(1, 0.0); \ + std::shared_ptr taskDataSeq = \ + khasanyanov_k_average_vector_mpi::create_task_data(in, seq_out); \ + \ + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential testMpiTaskSequential( \ + taskDataSeq); \ + \ + RUN_TASK(testMpiTaskSequential); \ + EXPECT_NEAR(seq_out[0], out[0], 1e-5); \ + } \ + } + +#define RUN_FUNC_MPI_TESTS(Size) \ + FUNC_MPI_TEST(int8_t, double, Size) \ + FUNC_MPI_TEST(int16_t, double, Size) \ + FUNC_MPI_TEST(int32_t, double, Size) \ + FUNC_MPI_TEST(int64_t, double, Size) \ + FUNC_MPI_TEST(uint8_t, double, Size) \ + FUNC_MPI_TEST(uint16_t, double, Size) \ + FUNC_MPI_TEST(uint32_t, double, Size) \ + FUNC_MPI_TEST(uint64_t, double, Size) \ + FUNC_MPI_TEST(double, double, Size) \ + FUNC_MPI_TEST(float, double, Size) + +#define RUN_FUNC_TESTS(Size, Value) \ + RUN_FUNC_SEQ_TESTS(Size, Value) \ + RUN_FUNC_MPI_TESTS(Size) + +#define RUN_ALL_FUNC_TESTS() \ + RUN_FUNC_TESTS(1234, 7.7) \ + RUN_FUNC_TESTS(2000, 10) \ + RUN_FUNC_TESTS(9, 77) \ + RUN_FUNC_TESTS(3011, 111) \ + RUN_FUNC_TESTS(2, 23) + +//=======run============= +RUN_ALL_FUNC_TESTS() \ No newline at end of file diff --git a/tasks/mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp b/tasks/mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp new file mode 100644 index 00000000000..96ce6952257 --- /dev/null +++ b/tasks/mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp @@ -0,0 +1,205 @@ +#ifndef _AVG_MPI_HPP_ +#define _AVG_MPI_HPP_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +#ifndef RUN_TASK +#define RUN_TASK(task) \ + ASSERT_TRUE((task).validation()); \ + (task).pre_processing(); \ + (task).run(); \ + (task).post_processing(); + +#endif +namespace khasanyanov_k_average_vector_mpi { + +template +std::vector get_random_vector(size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = static_cast(gen() % 1000 + (gen() % 100) / 100.0); + } + return vec; +} + +template +std::shared_ptr create_task_data(std::vector& in, std::vector& out) { + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + return taskData; +} + +//=========================================sequential========================================= + +template +class AvgVectorMPITaskSequential : public ppc::core::Task { + std::vector input_; + Out avg = 0.0; + + public: + explicit AvgVectorMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; +}; + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp, tmp + taskData->inputs_count[0], std::back_inserter(input_)); + avg = 0.0; + return true; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential::run() { + internal_order_test(); + avg = static_cast(std::accumulate(input_.begin(), input_.end(), 0.0, std::plus())); + avg /= static_cast(taskData->inputs_count[0]); + // std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return true; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = avg; + return true; +} + +//=========================================parallel========================================= + +namespace mpi = boost::mpi; +template +class AvgVectorMPITaskParallel : public ppc::core::Task { + std::vector input_, local_input_; + Out avg = 0.0; + mpi::communicator world; + + public: + explicit AvgVectorMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + static std::pair, std::vector> displacement(size_t, size_t); + static int size_for_rank(int, int, int); +}; + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0; + } + return true; +} + +template +int khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::size_for_rank(int rank, int count, int size) { + int average = count / size; + int mod = count % size; + return average + ((rank < mod) ? 1 : 0); +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::pre_processing() { + internal_order_test(); + size_t input_size; + if (world.rank() == 0) { + input_size = taskData->inputs_count[0]; + } + + mpi::broadcast(world, input_size, 0); + + if (world.rank() == 0) { + std::pair, std::vector> disp = displacement(input_size, world.size()); + auto& displacements = disp.second; + auto& sizes = disp.first; + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + + input_.clear(); + std::copy(tmp, tmp + taskData->inputs_count[0], std::back_inserter(input_)); + + local_input_.resize(sizes[0]); + mpi::scatterv(world, input_, sizes, displacements, local_input_.data(), sizes[0], 0); + + } else { + auto size = size_for_rank(world.rank(), input_size, world.size()); + local_input_.resize(size); + mpi::scatterv(world, local_input_.data(), size, 0); + } + avg = 0.0; + return true; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::run() { + internal_order_test(); + Out local_sum{}; + local_sum = static_cast(std::accumulate(local_input_.begin(), local_input_.end(), 0.0, std::plus())); + mpi::reduce(world, local_sum, avg, std::plus(), 0); + // std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return true; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = avg / input_.size(); + } + return true; +} + +template +std::pair, std::vector> +khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::displacement(size_t input_size, size_t n) { + const size_t capacity = n; + size_t count = input_size / capacity; + size_t mod = input_size % capacity; + std::vector sizes(capacity, count); + std::transform(sizes.cbegin(), sizes.cbegin() + mod, sizes.begin(), [](auto i) { return i + 1; }); + std::vector disp(capacity); + disp[0] = 0; + std::generate(disp.begin() + 1, disp.end(), [&, i = 0]() mutable { + ++i; + return disp[i - 1] + sizes[i - 1]; + }); + + return {sizes, disp}; +} + +} // namespace khasanyanov_k_average_vector_mpi + +#endif // !_AVG_MPI_HPP_ diff --git a/tasks/mpi/khasanyanov_k_average_vector/perf_tests/main.cpp b/tasks/mpi/khasanyanov_k_average_vector/perf_tests/main.cpp new file mode 100644 index 00000000000..25e167b5024 --- /dev/null +++ b/tasks/mpi/khasanyanov_k_average_vector/perf_tests/main.cpp @@ -0,0 +1,147 @@ +#include +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp" + +//=========================================sequence========================================= + +const int SIZE = 2220000; + +TEST(khasanyanov_k_average_vector_seq, test_pipeline_run) { + std::vector global_vec(SIZE, 4); + std::vector average(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average); + + auto testAvgVectorSequence = + std::make_shared>(taskData); + + RUN_TASK(*testAvgVectorSequence); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testAvgVectorSequence); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(4, average[0], 1e-5); +} + +TEST(khasanyanov_k_average_vector_seq, test_task_run) { + std::vector global_vec(SIZE, 4); + std::vector average(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average); + + auto testAvgVectorSequence = + std::make_shared>(taskData); + + RUN_TASK(*testAvgVectorSequence); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testAvgVectorSequence); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(4, average[0], 1e-5); +} + +//=========================================parallel========================================= + +TEST(khasanyanov_k_average_vector_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector average_par(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = khasanyanov_k_average_vector_mpi::get_random_vector(SIZE); + taskDataPar = khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average_par); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + + RUN_TASK(*testMpiTaskParallel); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + std::vector average_seq(1, 0.0); + std::shared_ptr taskDataSeq = + khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average_seq); + auto testMpiTaskSequential = + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential(taskDataSeq); + RUN_TASK(testMpiTaskSequential); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(average_seq[0], average_par[0], 1e-5); + } +} + +TEST(khasanyanov_k_average_vector_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector average_par(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = khasanyanov_k_average_vector_mpi::get_random_vector(SIZE); + taskDataPar = khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average_par); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + + RUN_TASK(*testMpiTaskParallel); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + std::vector average_seq(1, 0.0); + std::shared_ptr taskDataSeq = + khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average_seq); + auto testMpiTaskSequential = + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential(taskDataSeq); + RUN_TASK(testMpiTaskSequential); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(average_seq[0], average_par[0], 1e-5); + } +} \ No newline at end of file diff --git a/tasks/mpi/khasanyanov_k_average_vector/src/avg_mpi.cpp b/tasks/mpi/khasanyanov_k_average_vector/src/avg_mpi.cpp new file mode 100644 index 00000000000..1e73d54ad1a --- /dev/null +++ b/tasks/mpi/khasanyanov_k_average_vector/src/avg_mpi.cpp @@ -0,0 +1,3 @@ +#include "mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp" + +/* nothing to realization*/ diff --git a/tasks/seq/khasanyanov_k_average_vector/func_tests/main.cpp b/tasks/seq/khasanyanov_k_average_vector/func_tests/main.cpp new file mode 100644 index 00000000000..760942f7ca7 --- /dev/null +++ b/tasks/seq/khasanyanov_k_average_vector/func_tests/main.cpp @@ -0,0 +1,54 @@ +#include +#include +#include +#include + +#include "../include/avg_seq.hpp" +#include "core/task/include/task.hpp" +#include "gtest/gtest.h" + +#define FUNC_SEQ_TEST(InType, OutType, Size, Value) \ + \ + TEST(khasanyanov_k_average_vector_seq, test_seq_##InType##_##Size) { \ + std::vector in(Size, static_cast(Value)); \ + std::vector out(1, 0.0); \ + std::shared_ptr taskData = \ + khasanyanov_k_average_vector_seq::create_task_data(in, out); \ + khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential testTask(taskData); \ + RUN_TASK(testTask); \ + EXPECT_NEAR(out[0], static_cast(Value), 1e-5); \ + } + +#define RUN_FUNC_SEQ_TESTS(Size, Value) \ + FUNC_SEQ_TEST(int8_t, double, Size, Value) \ + FUNC_SEQ_TEST(int16_t, double, Size, Value) \ + FUNC_SEQ_TEST(int32_t, double, Size, Value) \ + FUNC_SEQ_TEST(int64_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint8_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint16_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint32_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint64_t, double, Size, Value) \ + FUNC_SEQ_TEST(double, double, Size, Value) \ + FUNC_SEQ_TEST(float, double, Size, Value) + +TEST(khasanyanov_k_average_vector_seq, test_random) { + std::vector in = khasanyanov_k_average_vector_seq::get_random_vector(15); + std::vector out(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_seq::create_task_data(in, out); + + khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential testTask(taskData); + RUN_TASK(testTask); + + double expect_res = std::accumulate(in.begin(), in.end(), 0.0, std::plus()) / in.size(); + EXPECT_NEAR(out[0], expect_res, 1e-5); +} + +#define RUN_ALL_FUNC_TESTS() \ + RUN_FUNC_SEQ_TESTS(1234, 7.7) \ + RUN_FUNC_SEQ_TESTS(2000, 10) \ + RUN_FUNC_SEQ_TESTS(9, 77) \ + RUN_FUNC_SEQ_TESTS(3011, 111) + +RUN_ALL_FUNC_TESTS() \ No newline at end of file diff --git a/tasks/seq/khasanyanov_k_average_vector/include/avg_seq.hpp b/tasks/seq/khasanyanov_k_average_vector/include/avg_seq.hpp new file mode 100644 index 00000000000..be6fa872032 --- /dev/null +++ b/tasks/seq/khasanyanov_k_average_vector/include/avg_seq.hpp @@ -0,0 +1,96 @@ +#ifndef _AVG_SEQ_HPP_ +#define _AVG_SEQ_HPP_ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +#ifndef RUN_TASK +#define RUN_TASK(task) \ + ASSERT_TRUE((task).validation()); \ + (task).pre_processing(); \ + (task).run(); \ + (task).post_processing(); + +#endif +namespace khasanyanov_k_average_vector_seq { + +template +std::vector get_random_vector(size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = static_cast(gen() % 1000 + (gen() % 100) / 100.0); + } + return vec; +} + +template +std::shared_ptr create_task_data(std::vector& in, std::vector& out) { + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + return taskData; +} + +//=========================================sequential========================================= + +template +class AvgVectorSEQTaskSequential : public ppc::core::Task { + std::vector input_; + Out avg = 0.0; + + public: + explicit AvgVectorSEQTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; +}; + +template +bool khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0; +} + +template +bool khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp, tmp + taskData->inputs_count[0], std::back_inserter(input_)); + avg = 0.0; + return true; +} + +template +bool khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential::run() { + internal_order_test(); + avg = static_cast(std::accumulate(input_.begin(), input_.end(), 0.0, std::plus())); + avg /= static_cast(taskData->inputs_count[0]); + // std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return true; +} + +template +bool khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = avg; + return true; +} + +} // namespace khasanyanov_k_average_vector_seq + +#endif // !_AVG_MPI_HPP_ diff --git a/tasks/seq/khasanyanov_k_average_vector/perf_tests/main.cpp b/tasks/seq/khasanyanov_k_average_vector/perf_tests/main.cpp new file mode 100644 index 00000000000..7d798b0f8cb --- /dev/null +++ b/tasks/seq/khasanyanov_k_average_vector/perf_tests/main.cpp @@ -0,0 +1,68 @@ +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/khasanyanov_k_average_vector/include/avg_seq.hpp" + +//=========================================sequence========================================= + +const int SIZE = 1220000; + +TEST(khasanyanov_k_average_vector_seq, test_pipeline_run) { + std::vector global_vec(SIZE, 4); + std::vector average(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_seq::create_task_data(global_vec, average); + + auto testAvgVectorSequence = + std::make_shared>(taskData); + + RUN_TASK(*testAvgVectorSequence); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testAvgVectorSequence); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(4, average[0]); +} + +TEST(khasanyanov_k_average_vector_seq, test_task_run) { + std::vector global_vec(SIZE, 4); + std::vector average(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_seq::create_task_data(global_vec, average); + + auto testAvgVectorSequence = + std::make_shared>(taskData); + + RUN_TASK(*testAvgVectorSequence); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testAvgVectorSequence); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(4, average[0]); +} \ No newline at end of file diff --git a/tasks/seq/khasanyanov_k_average_vector/src/avg_seq.cpp b/tasks/seq/khasanyanov_k_average_vector/src/avg_seq.cpp new file mode 100644 index 00000000000..0569ed917a8 --- /dev/null +++ b/tasks/seq/khasanyanov_k_average_vector/src/avg_seq.cpp @@ -0,0 +1,3 @@ +#include "seq/khasanyanov_k_average_vector/include/avg_seq.hpp" + +/* nothing to realization*/ From 6eccf1b890052aea84d36073a1cda6110bedc178 Mon Sep 17 00:00:00 2001 From: ascannel <113050263+ascannel@users.noreply.github.com> Date: Sun, 27 Oct 2024 17:54:09 +0300 Subject: [PATCH 013/155] =?UTF-8?q?=D0=9B=D0=BE=D0=BF=D0=B0=D1=82=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=98=D0=BB=D1=8C=D1=8F.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2024.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D1=91=D1=82=20=D1=87?= =?UTF-8?q?=D0=B8=D1=81=D0=BB=D0=B0=20=D1=81=D0=BB=D0=BE=D0=B2=20=D0=B2=20?= =?UTF-8?q?=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.=20=20(#35)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Эта программа предназначена для подсчета количества слов в длинном тексте. Реализовано два варианта решения задачи: последовательный (SEQ) и параллельный (MPI). **Последовательный вариант (SEQ)** В последовательном варианте программа использует функцию run() класса TestMPITaskSequential, которая проходит по массиву символов input_ и подсчитывает количество слов. Алгоритм работы функции следующий: - Переменная wordCount инициализируется нулем и используется для хранения количества слов. - Функция проходит по массиву символов input_ и проверяет каждый символ, является ли он пробелом. После подсчёта пробелов в самом конце к этому количеству прибавляется единица, так как количество слов равняется количеству пробелов между ними + 1. **Параллельный вариант (MPI)** В параллельном варианте программа использует функцию run() класса TestMPITaskParallel, которая разделяет массив символов на несколько частей и распределяет их между процессами. Алгоритм работы функции следующий: - Функция pre_processing() класса TestMPITaskParallel инициализирует переменные localInput_ и localWordCount и распределяет массив символов между процессами. Процесс с рангом 0 отправляет части массива символов другим процессам. - Функция run() класса TestMPITaskParallel проходит по своей части массива символов и подсчитывает количество слов, используя алгоритм, аналогичный последовательному варианту. --- .../func_tests/countWordsFuncTests.cpp | 181 ++++++++++++++++++ .../include/countWordsMPIHeader.hpp | 47 +++++ .../perf_tests/countWordsPerfTests.cpp | 72 +++++++ .../src/countWordsMPI.cpp | 100 ++++++++++ .../func_tests/countWordsFuncTests.cpp | 97 ++++++++++ .../include/countWordsSeqHeader.hpp | 27 +++ .../perf_tests/countWordsPerfTests.cpp | 66 +++++++ .../src/countWordsSeq.cpp | 52 +++++ 8 files changed, 642 insertions(+) create mode 100644 tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp create mode 100644 tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp create mode 100644 tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp create mode 100644 tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp create mode 100644 tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp create mode 100644 tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp create mode 100644 tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp create mode 100644 tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp diff --git a/tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp b/tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp new file mode 100644 index 00000000000..970de60179c --- /dev/null +++ b/tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp @@ -0,0 +1,181 @@ +#include + +#include "mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp" + +TEST(lopatin_i_count_words_mpi, test_empty_string) { + boost::mpi::communicator world; + std::vector input = {}; + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_FALSE(testTaskParallel.validation()); + } +} + +TEST(lopatin_i_count_words_mpi, test_3_words) { + boost::mpi::communicator world; + std::vector input; + std::string testString = "three funny words"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + } + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector referenceWordCount(1, 0); + std::shared_ptr taskDataSequential = std::make_shared(); + + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], referenceWordCount[0]); + } +} + +TEST(lopatin_i_count_words_mpi, test_300_words) { + boost::mpi::communicator world; + std::vector input = lopatin_i_count_words_mpi::generateLongString(20); + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + } + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector referenceWordCount(1, 0); + std::shared_ptr taskDataSequential = std::make_shared(); + + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], referenceWordCount[0]); + } +} + +TEST(lopatin_i_count_words_mpi, test_1500_words) { + boost::mpi::communicator world; + std::vector input = lopatin_i_count_words_mpi::generateLongString(100); + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + } + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector referenceWordCount(1, 0); + std::shared_ptr taskDataSequential = std::make_shared(); + + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], referenceWordCount[0]); + } +} + +TEST(lopatin_i_count_words_mpi, test_6k_words) { + boost::mpi::communicator world; + std::vector input = lopatin_i_count_words_mpi::generateLongString(400); + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + } + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector referenceWordCount(1, 0); + std::shared_ptr taskDataSequential = std::make_shared(); + + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], referenceWordCount[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp b/tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp new file mode 100644 index 00000000000..f811a89d29c --- /dev/null +++ b/tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace lopatin_i_count_words_mpi { + +std::vector generateLongString(int n); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int wordCount{}; + int spaceCount{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector localInput_; + int wordCount{}; + int spaceCount{}; + int localSpaceCount{}; + boost::mpi::communicator world; +}; + +} // namespace lopatin_i_count_words_mpi \ No newline at end of file diff --git a/tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp b/tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp new file mode 100644 index 00000000000..cb9e8d2701d --- /dev/null +++ b/tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp @@ -0,0 +1,72 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp" + +std::vector testData = lopatin_i_count_words_mpi::generateLongString(2000); + +TEST(lopatin_i_count_words_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector input = testData; + std::vector wordCount(1, 0); + + std::shared_ptr taskData = std::make_shared(); + + if (world.rank() == 0) { + taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskData->outputs_count.emplace_back(wordCount.size()); + } + + auto testTask = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(wordCount[0], 30000); + } +} + +TEST(lopatin_i_count_words_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector input = testData; + std::vector wordCount(1, 0); + + std::shared_ptr taskData = std::make_shared(); + + if (world.rank() == 0) { + taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskData->outputs_count.emplace_back(wordCount.size()); + } + + auto testTask = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(wordCount[0], 30000); + } +} \ No newline at end of file diff --git a/tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp b/tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp new file mode 100644 index 00000000000..2059bd79f3e --- /dev/null +++ b/tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp @@ -0,0 +1,100 @@ +#include "mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp" + +namespace lopatin_i_count_words_mpi { + +std::vector generateLongString(int n) { + std::vector testData; + std::string testString = "This is a long sentence for performance testing of the word count algorithm using MPI. "; + for (int i = 0; i < n; i++) { + for (unsigned long int j = 0; j < testString.length(); j++) { + testData.push_back(testString[j]); + } + } + return testData; +} + +bool TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tempPtr[i]; + } + return true; +} + +bool TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool TestMPITaskSequential::run() { + internal_order_test(); + for (char c : input_) { + if (c == ' ') { + spaceCount++; + } + } + wordCount = spaceCount + 1; + return true; +} + +bool TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = wordCount; + return true; +} + +bool TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int chunkSize = 0; + if (world.rank() == 0) { + input_ = std ::vector(taskData->inputs_count[0]); + auto* tmpPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned long int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmpPtr[i]; + } + chunkSize = taskData->inputs_count[0] / world.size(); + } + boost::mpi::broadcast(world, chunkSize, 0); + + localInput_.resize(chunkSize); + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * chunkSize, chunkSize); + } + localInput_ = std::vector(input_.begin(), input_.begin() + chunkSize); + } else { + world.recv(0, 0, localInput_.data(), chunkSize); + } + return true; +} + +bool TestMPITaskParallel::validation() { + internal_order_test(); + return (world.rank() == 0) ? (taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1) : true; +} + +bool TestMPITaskParallel::run() { + internal_order_test(); + for (char c : localInput_) { + if (c == ' ') { + localSpaceCount++; + } + } + boost::mpi::reduce(world, localSpaceCount, spaceCount, std::plus<>(), 0); + if (world.rank() == 0) { + wordCount = spaceCount + 1; + } + return true; +} + +bool TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = wordCount; + } + return true; +} + +} // namespace lopatin_i_count_words_mpi \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp b/tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp new file mode 100644 index 00000000000..962d2dfd001 --- /dev/null +++ b/tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp @@ -0,0 +1,97 @@ +#include + +#include "seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp" + +TEST(lopatin_i_count_words_seq, test_empty_string) { + std::vector input = {}; + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), false); +} + +TEST(lopatin_i_count_words_seq, test_3_words) { + std::vector input; + std::string testString = "three funny words"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 3); +} + +TEST(lopatin_i_count_words_seq, test_300_words) { + std::vector input = lopatin_i_count_words_seq::generateLongString(20); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 300); +} + +TEST(lopatin_i_count_words_seq, test_1500_words) { + std::vector input = lopatin_i_count_words_seq::generateLongString(100); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 1500); +} + +TEST(lopatin_i_count_words_seq, test_6k_words) { + std::vector input = lopatin_i_count_words_seq::generateLongString(400); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 6000); +} \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp b/tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp new file mode 100644 index 00000000000..96510732c77 --- /dev/null +++ b/tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace lopatin_i_count_words_seq { +std::vector generateLongString(int n); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int wordCount{}; + int spaceCount{}; +}; + +} // namespace lopatin_i_count_words_seq \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp b/tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp new file mode 100644 index 00000000000..61b2e1fa450 --- /dev/null +++ b/tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp @@ -0,0 +1,66 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp" + +std::vector testData = lopatin_i_count_words_seq::generateLongString(1000); + +TEST(word_count_seq, test_pipeline_run) { + std::vector input = testData; + std::vector word_count(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); + taskData->outputs_count.emplace_back(word_count.size()); + + auto testTask = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(word_count[0], 15000); +} + +TEST(word_count_seq, test_task_run) { + std::vector input = testData; + std::vector word_count(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); + taskData->outputs_count.emplace_back(word_count.size()); + + auto testTask = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(word_count[0], 15000); +} \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp b/tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp new file mode 100644 index 00000000000..823c3cb8c24 --- /dev/null +++ b/tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp @@ -0,0 +1,52 @@ +#include "seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp" + +namespace lopatin_i_count_words_seq { + +std::vector generateLongString(int n) { + std::vector testData; + std::string testString = "This is a long sentence for performance testing of the word count algorithm using MPI. "; + for (int i = 0; i < n - 1; i++) { + for (unsigned long int j = 0; j < testString.length(); j++) { + testData.push_back(testString[j]); + } + } + std::string lastSentence = "This is a long sentence for performance testing of the word count algorithm using MPI."; + for (unsigned long int j = 0; j < lastSentence.length(); j++) { + testData.push_back(lastSentence[j]); + } + return testData; +} + +bool lopatin_i_count_words_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tempPtr[i]; + } + return true; +} + +bool lopatin_i_count_words_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool lopatin_i_count_words_seq::TestTaskSequential::run() { + internal_order_test(); + for (char c : input_) { + if (c == ' ') { + spaceCount++; + } + } + wordCount = spaceCount + 1; + return true; +} + +bool lopatin_i_count_words_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = wordCount; + return true; +} + +} // namespace lopatin_i_count_words_seq \ No newline at end of file From 454e7493d65f12523942f25366b8520822bc3d5a Mon Sep 17 00:00:00 2001 From: MatveyKurakin <113084585+MatveyKurakin@users.noreply.github.com> Date: Sun, 27 Oct 2024 18:00:22 +0300 Subject: [PATCH 014/155] =?UTF-8?q?=D0=9A=D1=83=D1=80=D0=B0=D0=BA=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=9C=D0=B0=D1=82=D0=B2=D0=B5=D0=B9.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2017.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB?= =?UTF-8?q?=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D1=80=D0=BE=D0=BA?= =?UTF-8?q?=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#?= =?UTF-8?q?15)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательного алгоритма: - Проходим по каждому элементу строки и определяем её минимум Описание MPI алгоритма: - Нулевой процесс распределяет данные среди всех процессов. Он делит матрицу, представленную как вектор, на части. Количество частей равно количеству процессов. - На этапе run() каждый процесс сам определяет границы строк и находит их минимумы. Если строка разбита на несколько процессов, то каждый процесс ищет минимум на своем участке, а затем, с помощью reduce, результат собирается в 0 процессе. --- .../func_tests/main.cpp | 363 ++++++++++++++++++ .../include/ops_mpi.hpp | 50 +++ .../perf_tests/main.cpp | 104 +++++ .../src/ops_mpi.cpp | 150 ++++++++ .../func_tests/main.cpp | 131 +++++++ .../include/ops_seq.hpp | 26 ++ .../perf_tests/main.cpp | 100 +++++ .../src/ops_seq.cpp | 46 +++ 8 files changed, 970 insertions(+) create mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..ec80d2b3548 --- /dev/null +++ b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,363 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_3_5) { + int count_rows = 3; + int size_rows = 5; + boost::mpi::communicator world; + std::vector global_mat; + std::vector ans; + std::vector par_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = {1, 5, 3, 7, 9, 3, 4, 6, 7, 9, 2, 4, 2, 5, 0}; + ans = {1, 3, 0}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, ans); + ASSERT_EQ(par_min_vec, ans); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_3_6) { + int count_rows = 3; + int size_rows = 6; + boost::mpi::communicator world; + std::vector global_mat; + std::vector ans; + std::vector par_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9}; + ans = {3, 4, 2}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, ans); + ASSERT_EQ(par_min_vec, ans); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_4_5) { + int count_rows = 4; + int size_rows = 5; + boost::mpi::communicator world; + std::vector global_mat; + std::vector ans; + std::vector par_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9, 5, 8}; + ans = {3, 4, 2, 5}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, ans); + ASSERT_EQ(par_min_vec, ans); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_12) { + int count_rows = 10; + int size_rows = 12; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, par_min_vec); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_15) { + int count_rows = 10; + int size_rows = 15; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, par_min_vec); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_2) { + int count_rows = 10; + int size_rows = 2; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, par_min_vec); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_0_0) { + int count_rows = 0; + int size_rows = 0; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..68621f67642 --- /dev/null +++ b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp @@ -0,0 +1,50 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kurakin_m_min_values_by_rows_matrix_mpi { + +std::vector getRandomVector(int sz); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int count_rows{}; + int size_rows{}; + std::vector input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int count_rows{}; + int size_rows{}; + std::vector input_, local_input_; + std::vector res; + boost::mpi::communicator world; +}; + +} // namespace kurakin_m_min_values_by_rows_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..a290fa90e1a --- /dev/null +++ b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,104 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" + +TEST(kurakin_m_min_values_by_rows_matrix_mpi_perf_test, test_pipeline_run) { + int count_rows = 100; + int size_rows = 400; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = std::vector(count_rows * size_rows, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < par_min_vec.size(); i++) { + EXPECT_EQ(1, par_min_vec[0]); + } + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi_perf_test, test_task_run) { + int count_rows = 100; + int size_rows = 400; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = std::vector(count_rows * size_rows, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < par_min_vec.size(); i++) { + EXPECT_EQ(1, par_min_vec[0]); + } + } +} diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..db05f44e74a --- /dev/null +++ b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp @@ -0,0 +1,150 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + count_rows = (int)*taskData->inputs[1]; + size_rows = (int)*taskData->inputs[2]; + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + res = std::vector(count_rows, 0); + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + return *taskData->inputs[1] != 0 && *taskData->inputs[2] != 0 && *taskData->inputs[1] == taskData->outputs_count[0]; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + for (int i = 0; i < count_rows; i++) { + res[i] = *std::min_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + count_rows = 0; + size_rows = 0; + + unsigned int delta = 0; + + if (world.rank() == 0) { + count_rows = (int)*taskData->inputs[1]; + size_rows = (int)*taskData->inputs[2]; + if (taskData->inputs_count[0] % world.size() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } else { + delta = taskData->inputs_count[0] / world.size() + 1; + } + } + + broadcast(world, count_rows, 0); + broadcast(world, size_rows, 0); + broadcast(world, delta, 0); + + if (world.rank() == 0) { + input_ = std::vector(delta * world.size(), INT_MAX); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + } + + local_input_ = std::vector(delta); + boost::mpi::scatter(world, input_.data(), local_input_.data(), delta, 0); + + res = std::vector(count_rows, INT_MAX); + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return *taskData->inputs[1] != 0 && *taskData->inputs[2] != 0 && *taskData->inputs[1] == taskData->outputs_count[0]; + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + unsigned int last_delta = 0; + if (world.rank() == world.size() - 1) { + last_delta = local_input_.size() * world.size() - size_rows * count_rows; + } + + unsigned int ind = world.rank() * local_input_.size() / size_rows; + for (unsigned int i = 0; i < ind; ++i) { + reduce(world, INT_MAX, res[i], boost::mpi::minimum(), 0); + } + + unsigned int delta = std::min(local_input_.size(), size_rows - world.rank() * local_input_.size() % size_rows); + int local_res; + + local_res = *std::min_element(local_input_.begin(), local_input_.begin() + delta); + reduce(world, local_res, res[ind], boost::mpi::minimum(), 0); + ++ind; + + unsigned int k = 0; + while (local_input_.begin() + delta + k * size_rows < local_input_.end() - last_delta) { + local_res = *std::min_element(local_input_.begin() + delta + k * size_rows, + std::min(local_input_.end(), local_input_.begin() + delta + (k + 1) * size_rows)); + reduce(world, local_res, res[ind], boost::mpi::minimum(), 0); + ++k; + ++ind; + } + + for (unsigned int i = ind; i < res.size(); ++i) { + reduce(world, INT_MAX, res[i], boost::mpi::minimum(), 0); + } + + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..480eccc6ab7 --- /dev/null +++ b/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,131 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min1) { + int count_rows; + int size_rows; + + // Create data + count_rows = 3; + size_rows = 5; + std::vector global_mat = {1, 5, 3, 7, 9, 3, 4, 6, 7, 9, 2, 4, 2, 5, 0}; + + std::vector seq_min_vec(count_rows, 0); + std::vector ans = {1, 3, 0}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, seq_min_vec); +} + +TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min2) { + int count_rows; + int size_rows; + + // Create data + count_rows = 3; + size_rows = 6; + std::vector global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9}; + + std::vector seq_min_vec(count_rows, 0); + std::vector ans = {3, 4, 2}; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, seq_min_vec); +} + +TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min3) { + int count_rows; + int size_rows; + + // Create data + count_rows = 4; + size_rows = 5; + + std::vector global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9, 5, 8}; + + std::vector seq_min_vec(count_rows, 0); + std::vector ans = {3, 4, 2, 5}; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, seq_min_vec); +} + +TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min_null) { + int count_rows; + int size_rows; + // Create data + count_rows = 0; + size_rows = 0; + std::vector global_mat(count_rows * size_rows); + std::vector seq_min_vec(count_rows, 0); + std::vector ans(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + // Create Task + kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(seq_min_vec, ans); +} \ No newline at end of file diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..6c4d04360c4 --- /dev/null +++ b/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kurakin_m_min_values_by_rows_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int count_rows{}; + int size_rows{}; + std::vector input_; + std::vector res; +}; + +} // namespace kurakin_m_min_values_by_rows_matrix_seq \ No newline at end of file diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..b07bae29d92 --- /dev/null +++ b/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,100 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(kurakin_m_min_values_by_rows_matrix_seq, test_pipeline_run) { + int count_rows; + int size_rows; + + // Create data + count_rows = 100; + size_rows = 400; + std::vector global_mat(count_rows * size_rows, 1); + std::vector seq_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (size_t i = 0; i < seq_min_vec.size(); i++) { + EXPECT_EQ(1, seq_min_vec[0]); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_seq, test_task_run) { + int count_rows; + int size_rows; + + // Create data + count_rows = 100; + size_rows = 400; + std::vector global_mat(count_rows * size_rows, 1); + std::vector seq_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < seq_min_vec.size(); i++) { + EXPECT_EQ(1, seq_min_vec[0]); + } +} diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..9219594818e --- /dev/null +++ b/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp @@ -0,0 +1,46 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" + +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + count_rows = (int)*taskData->inputs[1]; + size_rows = (int)*taskData->inputs[2]; + res = std::vector(count_rows, 0); + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return *taskData->inputs[1] == taskData->outputs_count[0]; +} + +bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + res[i] = *std::min_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} From 00c27692224cdb75a3a3e24a239c3e7028f9d374 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:57:52 +0300 Subject: [PATCH 015/155] =?UTF-8?q?=D0=A2=D0=B8=D1=82=D0=BE=D0=B2=20=D0=A1?= =?UTF-8?q?=D0=B5=D0=BC=D1=91=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87?= =?UTF-8?q?=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=201.?= =?UTF-8?q?=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D1=8D=D0=BB=D0=B5=D0=BC?= =?UTF-8?q?=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE?= =?UTF-8?q?=D1=80=D0=B0.=20(#14)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание последовательной задачи:** Суммирование элементов с помощью std::accumulate (итерация по вектору с накоплением суммы). **Описание параллельной задачи:** Вектор разделяется на равные части между процессами, которые одновременно вычисляют частичные суммы. После завершения локальных вычислений процессы возвращают результаты главному процессу, где они суммируются с использованием операции reduce. Внутри каждого процесса суммирование происходит также с помощью std::accumulate. --- .../titov_s_vector_sum/func_tests/main.cpp | 241 ++++++++++++++++++ .../titov_s_vector_sum/include/ops_mpi.hpp | 49 ++++ .../titov_s_vector_sum/perf_tests/main.cpp | 88 +++++++ tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp | 116 +++++++++ .../titov_s_vector_sum/func_tests/main.cpp | 135 ++++++++++ .../titov_s_vector_sum/include/ops_seq.hpp | 26 ++ .../titov_s_vector_sum/perf_tests/main.cpp | 81 ++++++ tasks/seq/titov_s_vector_sum/src/ops_seq.cpp | 45 ++++ 8 files changed, 781 insertions(+) create mode 100644 tasks/mpi/titov_s_vector_sum/func_tests/main.cpp create mode 100644 tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp create mode 100644 tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp create mode 100644 tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp create mode 100644 tasks/seq/titov_s_vector_sum/func_tests/main.cpp create mode 100644 tasks/seq/titov_s_vector_sum/include/ops_seq.hpp create mode 100644 tasks/seq/titov_s_vector_sum/perf_tests/main.cpp create mode 100644 tasks/seq/titov_s_vector_sum/src/ops_seq.cpp diff --git a/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp b/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp new file mode 100644 index 00000000000..c4fe34b01ba --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp @@ -0,0 +1,241 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" + +TEST(titov_s_vector_sum_mpi, Test_Sum_100) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_EmptyArray) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_sum[0], 0); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_1000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1000; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_100000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100000; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_SmallArray_1) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_SmallArray_0) { + boost::mpi::communicator world; + std::vector global_vec(1, 0); + std::vector global_sum(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} diff --git a/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp b/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp new file mode 100644 index 00000000000..3319bd016ec --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace titov_s_vector_sum_mpi { + +std::vector getRandomVector(int sz); + +class MPIVectorSumSequential : public ppc::core::Task { + public: + explicit MPIVectorSumSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; + std::string ops; +}; + +class MPIVectorSumParallel : public ppc::core::Task { + public: + explicit MPIVectorSumParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace titov_s_vector_sum_mpi diff --git a/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp b/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp new file mode 100644 index 00000000000..c565240f793 --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp @@ -0,0 +1,88 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" + +TEST(titov_s_vector_sum_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 100000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto MPIVectorSumParallel = std::make_shared(taskDataPar); + ASSERT_EQ(MPIVectorSumParallel->validation(), true); + MPIVectorSumParallel->pre_processing(); + MPIVectorSumParallel->run(); + MPIVectorSumParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MPIVectorSumParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 100000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto MPIVectorSumParallel = std::make_shared(taskDataPar); + ASSERT_EQ(MPIVectorSumParallel->validation(), true); + MPIVectorSumParallel->pre_processing(); + MPIVectorSumParallel->run(); + MPIVectorSumParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MPIVectorSumParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} diff --git a/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp b/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp new file mode 100644 index 00000000000..5f5b744c068 --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp @@ -0,0 +1,116 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector titov_s_vector_sum_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + res = 0; + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::run() { + internal_order_test(); + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + unsigned int remainder = 0; + + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + remainder = taskData->inputs_count[0] % world.size(); + } + + broadcast(world, delta, 0); + broadcast(world, remainder, 0); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + + for (int proc = 1; proc < world.size(); proc++) { + unsigned int send_size = (proc == world.size() - 1) ? delta + remainder : delta; + world.send(proc, 0, input_.data() + proc * delta, send_size); + } + } + local_input_ = std::vector((world.rank() == world.size() - 1) ? delta + remainder : delta); + + if (world.rank() != 0) { + unsigned int recv_size = (world.rank() == world.size() - 1) ? delta + remainder : delta; + world.recv(0, 0, local_input_.data(), recv_size); + } else { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } + + res = 0; + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::run() { + internal_order_test(); + int local_res; + local_res = std::accumulate(local_input_.begin(), local_input_.end(), 0); + reduce(world, local_res, res, std::plus(), 0); + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/titov_s_vector_sum/func_tests/main.cpp b/tasks/seq/titov_s_vector_sum/func_tests/main.cpp new file mode 100644 index 00000000000..3c60cb5d6f5 --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/func_tests/main.cpp @@ -0,0 +1,135 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/titov_s_vector_sum/include/ops_seq.hpp" + +TEST(titov_s_vector_sum_seq, Test_Int) { + // Create data + std::vector in(1, 10); + const int expected_sum = 10; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(expected_sum, out[0]); +} + +TEST(titov_s_vector_sum_seq, Test_Double) { + // Create data + std::vector in(1, 10); + const int expected_sum = 10; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + EXPECT_NEAR(out[0], expected_sum, 1e-6); +} + +TEST(titov_s_vector_sum_seq, Test_Float) { + // Create data + std::vector in(1, 1.f); + std::vector out(1, 0.f); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + EXPECT_NEAR(out[0], static_cast(in.size()), 1e-3f); +} + +TEST(titov_s_vector_sum_seq, Test_Int64_t) { + // Create data + std::vector in(75836, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(static_cast(out[0]), in.size()); +} + +TEST(titov_s_vector_sum_seq, Test_Uint8_t) { + // Create data + std::vector in(255, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(static_cast(out[0]), in.size()); +} + +TEST(titov_s_vector_sum_seq, Test_Empty_Array) { + // Create data + std::vector in(1, 0); + const int expected_sum = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(expected_sum, out[0]); +} diff --git a/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp b/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp new file mode 100644 index 00000000000..d29d94269bd --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace titov_s_vector_sum_seq { +template +class VectorSumSequential : public ppc::core::Task { + public: + explicit VectorSumSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + InOutType res; +}; + +} // namespace titov_s_vector_sum_seq diff --git a/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp b/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp new file mode 100644 index 00000000000..3b772d11c2b --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp @@ -0,0 +1,81 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/titov_s_vector_sum/include/ops_seq.hpp" + +TEST(titov_s_vector_sum_seq, test_pipeline_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto vectorSumSequential = std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(vectorSumSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(titov_s_vector_sum_seq, test_task_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto vectorSumSequential = std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(vectorSumSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp b/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp new file mode 100644 index 00000000000..98ca0cac0b2 --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp @@ -0,0 +1,45 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/titov_s_vector_sum/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +template +bool titov_s_vector_sum_seq::VectorSumSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + res = 0; + return true; +} + +template +bool titov_s_vector_sum_seq::VectorSumSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +template +bool titov_s_vector_sum_seq::VectorSumSequential::run() { + internal_order_test(); + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +template +bool titov_s_vector_sum_seq::VectorSumSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; From 0a3720e3ceb2cd8366ae26758ba761a3081dc7f1 Mon Sep 17 00:00:00 2001 From: MargaritaMuhina <130301530+MargaritaMuhina@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:13:53 +0300 Subject: [PATCH 016/155] =?UTF-8?q?=D0=9C=D1=83=D1=85=D0=B8=D0=BD=D0=B0=20?= =?UTF-8?q?=D0=9C=D0=B0=D1=80=D0=B3=D0=B0=D1=80=D0=B8=D1=82=D0=B0.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=204.=20=20=D0=9C=D0=B8=D0=BD=D0=B8?= =?UTF-8?q?=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0?= =?UTF-8?q?=D1=87=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5?= =?UTF-8?q?=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80?= =?UTF-8?q?=D0=B0.=20(#37)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Каждый элемент вектора, сравниваем со значением переменной, в которой хранится минимальное значение вектора. Если значение данного элемента вектора меньше чем значение переменной, то записываем значение данного элемента вектора в переменную. Описание MPI задачи: - Делится вектор на фрагменты (по количеству процессов). - Процесс с рангом 0 распределяет фрагменты между процессами. - Каждый процесс получает свой фрагмент (local_input_). - Каждый процесс находит минимальное значение в своем фрагменте. - Используется функция reduce с оператором minimum для сбора минимального значения из всех процессов. - Результат записывается в переменную res на процессе с рангом 0. - Процесс с рангом 0 записывает найденное минимальное значение в выходной буфер. --- .../func_tests/main.cpp | 203 ++++++++++++++++++ .../include/ops_mpi.hpp | 46 ++++ .../perf_tests/main.cpp | 105 +++++++++ .../src/ops_mpi.cpp | 112 ++++++++++ .../func_tests/main.cpp | 142 ++++++++++++ .../include/ops_seq.hpp | 24 +++ .../perf_tests/main.cpp | 99 +++++++++ .../src/ops_seq.cpp | 53 +++++ 8 files changed, 784 insertions(+) create mode 100644 tasks/mpi/muhina_m_min_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/muhina_m_min_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/muhina_m_min_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/muhina_m_min_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/muhina_m_min_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/muhina_m_min_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/muhina_m_min_of_vector_elements/func_tests/main.cpp b/tasks/mpi/muhina_m_min_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..796d40091ec --- /dev/null +++ b/tasks/mpi/muhina_m_min_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,203 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp" + +std::vector GetRandomVector(int sz, int min_value, int max_value) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min_value + gen() % (max_value - min_value + 1); + } + return vec; +} + +TEST(muhina_m_min_of_vector_elements, Test_Min) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + const int min_val = 0; + const int max_val = 100; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel minOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(minOfVectorMPIParalle.validation(), true); + minOfVectorMPIParalle.pre_processing(); + minOfVectorMPIParalle.run(); + minOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential minOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(minOfVectorMPISequential.validation(), true); + minOfVectorMPISequential.pre_processing(); + minOfVectorMPISequential.run(); + minOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(muhina_m_min_of_vector_elements, Test_Min_LargeVector) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10000; + const int min_val = 0; + const int max_val = 100; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel minOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(minOfVectorMPIParalle.validation(), true); + minOfVectorMPIParalle.pre_processing(); + minOfVectorMPIParalle.run(); + minOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential minOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(minOfVectorMPISequential.validation(), true); + minOfVectorMPISequential.pre_processing(); + minOfVectorMPISequential.run(); + minOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(muhina_m_min_of_vector_elements, Test_Min_NegativeValues) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + const int min_val = -100; + const int max_val = -10; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel minOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(minOfVectorMPIParalle.validation(), true); + minOfVectorMPIParalle.pre_processing(); + minOfVectorMPIParalle.run(); + minOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential minOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(minOfVectorMPISequential.validation(), true); + minOfVectorMPISequential.pre_processing(); + minOfVectorMPISequential.run(); + minOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(muhina_m_min_of_vector_elements, Test_Min_RepeatingValues) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + global_vec = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10}; + global_vec.resize(count_size_vector, 10); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel minOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(minOfVectorMPIParalle.validation(), true); + minOfVectorMPIParalle.pre_processing(); + minOfVectorMPIParalle.run(); + minOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential minOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(minOfVectorMPISequential.validation(), true); + minOfVectorMPISequential.pre_processing(); + minOfVectorMPISequential.run(); + minOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} diff --git a/tasks/mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..8772dc545ef --- /dev/null +++ b/tasks/mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,46 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace muhina_m_min_of_vector_elements_mpi { +int vectorMin(std::vector> v); + +class MinOfVectorMPISequential : public ppc::core::Task { + public: + explicit MinOfVectorMPISequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; + +class MinOfVectorMPIParallel : public ppc::core::Task { + public: + explicit MinOfVectorMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res_{}; + boost::mpi::communicator world_; +}; + +} // namespace muhina_m_min_of_vector_elements_mpi diff --git a/tasks/mpi/muhina_m_min_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/muhina_m_min_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..53d1f87927a --- /dev/null +++ b/tasks/mpi/muhina_m_min_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,105 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp" + +std::vector GetRandomVector(int sz, int min_value, int max_value) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min_value + gen() % (max_value - min_value + 1); + } + return vec; +} + +TEST(muhina_m_min_of_vector_elements_mpi, run_pipeline) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10000000; + const int min_val = 0; + const int max_val = 100; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + global_vec[0] = -100; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto MinOfVectorMPIParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(MinOfVectorMPIParallel->validation(), true); + MinOfVectorMPIParallel->pre_processing(); + MinOfVectorMPIParallel->run(); + MinOfVectorMPIParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MinOfVectorMPIParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(-100, global_res[0]); + } +} + +TEST(muhina_m_min_of_vector_elements_mpi, run_task) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10000000; + const int min_val = 0; + const int max_val = 100; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + global_vec[0] = -100; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto MinOfVectorMPIParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(MinOfVectorMPIParallel->validation(), true); + MinOfVectorMPIParallel->pre_processing(); + MinOfVectorMPIParallel->run(); + MinOfVectorMPIParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(MinOfVectorMPIParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(-100, global_res[0]); + } +} diff --git a/tasks/mpi/muhina_m_min_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/muhina_m_min_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..78112ee7361 --- /dev/null +++ b/tasks/mpi/muhina_m_min_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,112 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +int muhina_m_min_of_vector_elements_mpi::vectorMin(std::vector> vect) { + int mini = vect[0]; + + for (size_t i = 1; i < vect.size(); i++) { + if (vect[i] < mini) { + mini = vect[i]; + } + } + return mini; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential::run() { + internal_order_test(); + if (input_.empty()) { + // Handle the case when the input vector is empty + return true; + } + res_ = muhina_m_min_of_vector_elements_mpi::vectorMin(input_); + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world_.rank() == 0) { + delta = taskData->inputs_count[0] / world_.size(); + } + broadcast(world_, delta, 0); + + if (world_.rank() == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + for (int proc = 1; proc < world_.size(); proc++) { + world_.send(proc, 0, input_.data() + proc * delta, delta); + } + } + local_input_ = std::vector(delta); + if (world_.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world_.recv(0, 0, local_input_.data(), delta); + } + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel::validation() { + internal_order_test(); + if (world_.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel::run() { + internal_order_test(); + if (local_input_.empty()) { + // Handle the case when the local input vector is empty + return true; + } + int local_min = muhina_m_min_of_vector_elements_mpi::vectorMin(local_input_); + + reduce(world_, local_min, res_, boost::mpi::minimum(), 0); + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel::post_processing() { + internal_order_test(); + if (world_.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/seq/muhina_m_min_of_vector_elements/func_tests/main.cpp b/tasks/seq/muhina_m_min_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..97a02a08b37 --- /dev/null +++ b/tasks/seq/muhina_m_min_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,142 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp" + +std::vector GetRandomVector(int sz, int min_value, int max_value) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min_value + gen() % (max_value - min_value + 1); + } + return vec; +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_10) { + const int count = 10; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[0] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_20) { + const int count = 20; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[1] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_50) { + const int count = 50; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[1] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_70) { + const int count = 70; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[1] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_100) { + const int count = 100; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[1] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp b/tasks/seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..815628780cf --- /dev/null +++ b/tasks/seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace muhina_m_min_of_vector_elements_seq { +int vectorMin(std::vector> v); + +class MinOfVectorSequential : public ppc::core::Task { + public: + explicit MinOfVectorSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; +} // namespace muhina_m_min_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/muhina_m_min_of_vector_elements/perf_tests/main.cpp b/tasks/seq/muhina_m_min_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..5d72650790b --- /dev/null +++ b/tasks/seq/muhina_m_min_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,99 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp" + +std::vector GetRandomVector(int sz, int min_value, int max_value) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min_value + gen() % (max_value - min_value + 1); + } + return vec; +} + +TEST(muhina_m_min_of_vector_elements, test_pipeline_run) { + const int count = 2000000; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[0] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto MinOfVectorSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MinOfVectorSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements, test_task_run) { + const int count = 10000000; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[0] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto MinOfVectorSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MinOfVectorSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/muhina_m_min_of_vector_elements/src/ops_seq.cpp b/tasks/seq/muhina_m_min_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..a8f831431ee --- /dev/null +++ b/tasks/seq/muhina_m_min_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,53 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +int muhina_m_min_of_vector_elements_seq::vectorMin(std::vector> vect) { + int mini = vect[0]; + + for (size_t i = 1; i < vect.size(); i++) { + if (vect[i] < mini) { + mini = vect[i]; + } + } + return mini; +} + +bool muhina_m_min_of_vector_elements_seq::MinOfVectorSequential::pre_processing() { + internal_order_test(); + + // Init data vector + input_ = std::vector(taskData->inputs_count[0]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tempPtr[i]; + } + + return true; +} + +bool muhina_m_min_of_vector_elements_seq::MinOfVectorSequential::validation() { + internal_order_test(); + // Handle empty input vector + if (taskData->inputs_count[0] == 0) { + return taskData->outputs_count[0] == 0; + } + return taskData->outputs_count[0] == 1; +} + +bool muhina_m_min_of_vector_elements_seq::MinOfVectorSequential::run() { + internal_order_test(); + // Iterate through the vector + res_ = muhina_m_min_of_vector_elements_seq::vectorMin(input_); + return true; +} + +bool muhina_m_min_of_vector_elements_seq::MinOfVectorSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} From a67a98f394539618737da42b16378252723942da Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Mon, 28 Oct 2024 15:38:46 +0100 Subject: [PATCH 017/155] =?UTF-8?q?Revert=20"=D0=A2=D0=B8=D1=82=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=A1=D0=B5=D0=BC=D1=91=D0=BD.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=201.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D1=8D=D0=BB?= =?UTF-8?q?=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA?= =?UTF-8?q?=D1=82=D0=BE=D1=80=D0=B0."=20(#57)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#14 @Semyon1104 master was failed after yours PR https://github.com/learning-process/ppc-2024-autumn/actions/runs/11554431985/attempts/1 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11555830070 --- .../titov_s_vector_sum/func_tests/main.cpp | 241 ------------------ .../titov_s_vector_sum/include/ops_mpi.hpp | 49 ---- .../titov_s_vector_sum/perf_tests/main.cpp | 88 ------- tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp | 116 --------- .../titov_s_vector_sum/func_tests/main.cpp | 135 ---------- .../titov_s_vector_sum/include/ops_seq.hpp | 26 -- .../titov_s_vector_sum/perf_tests/main.cpp | 81 ------ tasks/seq/titov_s_vector_sum/src/ops_seq.cpp | 45 ---- 8 files changed, 781 deletions(-) delete mode 100644 tasks/mpi/titov_s_vector_sum/func_tests/main.cpp delete mode 100644 tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp delete mode 100644 tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp delete mode 100644 tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp delete mode 100644 tasks/seq/titov_s_vector_sum/func_tests/main.cpp delete mode 100644 tasks/seq/titov_s_vector_sum/include/ops_seq.hpp delete mode 100644 tasks/seq/titov_s_vector_sum/perf_tests/main.cpp delete mode 100644 tasks/seq/titov_s_vector_sum/src/ops_seq.cpp diff --git a/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp b/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp deleted file mode 100644 index c4fe34b01ba..00000000000 --- a/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include - -#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" - -TEST(titov_s_vector_sum_mpi, Test_Sum_100) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 100; - global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); - ASSERT_TRUE(MPIVectorSumParallel.validation()); - MPIVectorSumParallel.pre_processing(); - MPIVectorSumParallel.run(); - MPIVectorSumParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_sum(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - // Create Task - titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); - ASSERT_TRUE(MPIVectorSumSequential.validation()); - MPIVectorSumSequential.pre_processing(); - MPIVectorSumSequential.run(); - MPIVectorSumSequential.post_processing(); - - ASSERT_EQ(reference_sum[0], global_sum[0]); - } -} - -TEST(titov_s_vector_sum_mpi, Test_Sum_EmptyArray) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); - ASSERT_TRUE(MPIVectorSumParallel.validation()); - MPIVectorSumParallel.pre_processing(); - MPIVectorSumParallel.run(); - MPIVectorSumParallel.post_processing(); - - if (world.rank() == 0) { - ASSERT_EQ(global_sum[0], 0); - } -} - -TEST(titov_s_vector_sum_mpi, Test_Sum_1000) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 1000; - global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); - ASSERT_TRUE(MPIVectorSumParallel.validation()); - MPIVectorSumParallel.pre_processing(); - MPIVectorSumParallel.run(); - MPIVectorSumParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_sum(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - // Create Task - titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); - ASSERT_TRUE(MPIVectorSumSequential.validation()); - MPIVectorSumSequential.pre_processing(); - MPIVectorSumSequential.run(); - MPIVectorSumSequential.post_processing(); - - ASSERT_EQ(reference_sum[0], global_sum[0]); - } -} - -TEST(titov_s_vector_sum_mpi, Test_Sum_100000) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 100000; - global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); - ASSERT_TRUE(MPIVectorSumParallel.validation()); - MPIVectorSumParallel.pre_processing(); - MPIVectorSumParallel.run(); - MPIVectorSumParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_sum(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - // Create Task - titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); - ASSERT_TRUE(MPIVectorSumSequential.validation()); - MPIVectorSumSequential.pre_processing(); - MPIVectorSumSequential.run(); - MPIVectorSumSequential.post_processing(); - - ASSERT_EQ(reference_sum[0], global_sum[0]); - } -} - -TEST(titov_s_vector_sum_mpi, Test_Sum_SmallArray_1) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 1; - global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); - ASSERT_TRUE(MPIVectorSumParallel.validation()); - MPIVectorSumParallel.pre_processing(); - MPIVectorSumParallel.run(); - MPIVectorSumParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_sum(1, 0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); - ASSERT_TRUE(MPIVectorSumSequential.validation()); - MPIVectorSumSequential.pre_processing(); - MPIVectorSumSequential.run(); - MPIVectorSumSequential.post_processing(); - - ASSERT_EQ(reference_sum[0], global_sum[0]); - } -} - -TEST(titov_s_vector_sum_mpi, Test_Sum_SmallArray_0) { - boost::mpi::communicator world; - std::vector global_vec(1, 0); - std::vector global_sum(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); - ASSERT_TRUE(MPIVectorSumParallel.validation()); - MPIVectorSumParallel.pre_processing(); - MPIVectorSumParallel.run(); - MPIVectorSumParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_sum(1, 0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); - ASSERT_TRUE(MPIVectorSumSequential.validation()); - MPIVectorSumSequential.pre_processing(); - MPIVectorSumSequential.run(); - MPIVectorSumSequential.post_processing(); - - ASSERT_EQ(reference_sum[0], global_sum[0]); - } -} diff --git a/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp b/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp deleted file mode 100644 index 3319bd016ec..00000000000 --- a/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace titov_s_vector_sum_mpi { - -std::vector getRandomVector(int sz); - -class MPIVectorSumSequential : public ppc::core::Task { - public: - explicit MPIVectorSumSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - int res{}; - std::string ops; -}; - -class MPIVectorSumParallel : public ppc::core::Task { - public: - explicit MPIVectorSumParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - int res{}; - std::string ops; - boost::mpi::communicator world; -}; - -} // namespace titov_s_vector_sum_mpi diff --git a/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp b/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp deleted file mode 100644 index c565240f793..00000000000 --- a/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" - -TEST(titov_s_vector_sum_mpi, test_pipeline_run) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_vector; - if (world.rank() == 0) { - count_size_vector = 100000000; - global_vec = std::vector(count_size_vector, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - auto MPIVectorSumParallel = std::make_shared(taskDataPar); - ASSERT_EQ(MPIVectorSumParallel->validation(), true); - MPIVectorSumParallel->pre_processing(); - MPIVectorSumParallel->run(); - MPIVectorSumParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(MPIVectorSumParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(count_size_vector, global_sum[0]); - } -} - -TEST(titov_s_vector_sum_mpi, test_task_run) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_vector; - if (world.rank() == 0) { - count_size_vector = 100000000; - global_vec = std::vector(count_size_vector, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - auto MPIVectorSumParallel = std::make_shared(taskDataPar); - ASSERT_EQ(MPIVectorSumParallel->validation(), true); - MPIVectorSumParallel->pre_processing(); - MPIVectorSumParallel->run(); - MPIVectorSumParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(MPIVectorSumParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(count_size_vector, global_sum[0]); - } -} diff --git a/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp b/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp deleted file mode 100644 index 5f5b744c068..00000000000 --- a/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -std::vector titov_s_vector_sum_mpi::getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -bool titov_s_vector_sum_mpi::MPIVectorSumSequential::pre_processing() { - internal_order_test(); - // Init vectors - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - // Init value for output - res = 0; - return true; -} - -bool titov_s_vector_sum_mpi::MPIVectorSumSequential::validation() { - internal_order_test(); - // Check count elements of output - return taskData->outputs_count[0] == 1; -} - -bool titov_s_vector_sum_mpi::MPIVectorSumSequential::run() { - internal_order_test(); - res = std::accumulate(input_.begin(), input_.end(), 0); - return true; -} - -bool titov_s_vector_sum_mpi::MPIVectorSumSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} - -bool titov_s_vector_sum_mpi::MPIVectorSumParallel::pre_processing() { - internal_order_test(); - unsigned int delta = 0; - unsigned int remainder = 0; - - if (world.rank() == 0) { - delta = taskData->inputs_count[0] / world.size(); - remainder = taskData->inputs_count[0] % world.size(); - } - - broadcast(world, delta, 0); - broadcast(world, remainder, 0); - - if (world.rank() == 0) { - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - - for (int proc = 1; proc < world.size(); proc++) { - unsigned int send_size = (proc == world.size() - 1) ? delta + remainder : delta; - world.send(proc, 0, input_.data() + proc * delta, send_size); - } - } - local_input_ = std::vector((world.rank() == world.size() - 1) ? delta + remainder : delta); - - if (world.rank() != 0) { - unsigned int recv_size = (world.rank() == world.size() - 1) ? delta + remainder : delta; - world.recv(0, 0, local_input_.data(), recv_size); - } else { - local_input_ = std::vector(input_.begin(), input_.begin() + delta); - } - - res = 0; - return true; -} - -bool titov_s_vector_sum_mpi::MPIVectorSumParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - // Check count elements of output - return taskData->outputs_count[0] == 1; - } - return true; -} - -bool titov_s_vector_sum_mpi::MPIVectorSumParallel::run() { - internal_order_test(); - int local_res; - local_res = std::accumulate(local_input_.begin(), local_input_.end(), 0); - reduce(world, local_res, res, std::plus(), 0); - return true; -} - -bool titov_s_vector_sum_mpi::MPIVectorSumParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = res; - } - return true; -} diff --git a/tasks/seq/titov_s_vector_sum/func_tests/main.cpp b/tasks/seq/titov_s_vector_sum/func_tests/main.cpp deleted file mode 100644 index 3c60cb5d6f5..00000000000 --- a/tasks/seq/titov_s_vector_sum/func_tests/main.cpp +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "seq/titov_s_vector_sum/include/ops_seq.hpp" - -TEST(titov_s_vector_sum_seq, Test_Int) { - // Create data - std::vector in(1, 10); - const int expected_sum = 10; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); - ASSERT_TRUE(vectorSumSequential.validation()); - vectorSumSequential.pre_processing(); - vectorSumSequential.run(); - vectorSumSequential.post_processing(); - ASSERT_EQ(expected_sum, out[0]); -} - -TEST(titov_s_vector_sum_seq, Test_Double) { - // Create data - std::vector in(1, 10); - const int expected_sum = 10; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); - ASSERT_TRUE(vectorSumSequential.validation()); - vectorSumSequential.pre_processing(); - vectorSumSequential.run(); - vectorSumSequential.post_processing(); - EXPECT_NEAR(out[0], expected_sum, 1e-6); -} - -TEST(titov_s_vector_sum_seq, Test_Float) { - // Create data - std::vector in(1, 1.f); - std::vector out(1, 0.f); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); - ASSERT_TRUE(vectorSumSequential.validation()); - vectorSumSequential.pre_processing(); - vectorSumSequential.run(); - vectorSumSequential.post_processing(); - EXPECT_NEAR(out[0], static_cast(in.size()), 1e-3f); -} - -TEST(titov_s_vector_sum_seq, Test_Int64_t) { - // Create data - std::vector in(75836, 1); - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); - ASSERT_TRUE(vectorSumSequential.validation()); - vectorSumSequential.pre_processing(); - vectorSumSequential.run(); - vectorSumSequential.post_processing(); - ASSERT_EQ(static_cast(out[0]), in.size()); -} - -TEST(titov_s_vector_sum_seq, Test_Uint8_t) { - // Create data - std::vector in(255, 1); - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); - ASSERT_TRUE(vectorSumSequential.validation()); - vectorSumSequential.pre_processing(); - vectorSumSequential.run(); - vectorSumSequential.post_processing(); - ASSERT_EQ(static_cast(out[0]), in.size()); -} - -TEST(titov_s_vector_sum_seq, Test_Empty_Array) { - // Create data - std::vector in(1, 0); - const int expected_sum = 0; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); - ASSERT_TRUE(vectorSumSequential.validation()); - vectorSumSequential.pre_processing(); - vectorSumSequential.run(); - vectorSumSequential.post_processing(); - ASSERT_EQ(expected_sum, out[0]); -} diff --git a/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp b/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp deleted file mode 100644 index d29d94269bd..00000000000 --- a/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace titov_s_vector_sum_seq { -template -class VectorSumSequential : public ppc::core::Task { - public: - explicit VectorSumSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - InOutType res; -}; - -} // namespace titov_s_vector_sum_seq diff --git a/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp b/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp deleted file mode 100644 index 3b772d11c2b..00000000000 --- a/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/titov_s_vector_sum/include/ops_seq.hpp" - -TEST(titov_s_vector_sum_seq, test_pipeline_run) { - const int count = 10000000; - - // Create data - std::vector in(count, 0); - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto vectorSumSequential = std::make_shared>(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(vectorSumSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(0, out[0]); -} - -TEST(titov_s_vector_sum_seq, test_task_run) { - const int count = 10000000; - - // Create data - std::vector in(count, 0); - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto vectorSumSequential = std::make_shared>(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(vectorSumSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(0, out[0]); -} diff --git a/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp b/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp deleted file mode 100644 index 98ca0cac0b2..00000000000 --- a/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2024 Nesterov Alexander -#include "seq/titov_s_vector_sum/include/ops_seq.hpp" - -#include - -using namespace std::chrono_literals; - -template -bool titov_s_vector_sum_seq::VectorSumSequential::pre_processing() { - internal_order_test(); - input_ = std::vector(taskData->inputs_count[0]); - auto tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - // Init value for output - res = 0; - return true; -} - -template -bool titov_s_vector_sum_seq::VectorSumSequential::validation() { - internal_order_test(); - // Check count elements of output - return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; -} - -template -bool titov_s_vector_sum_seq::VectorSumSequential::run() { - internal_order_test(); - res = std::accumulate(input_.begin(), input_.end(), 0); - return true; -} - -template -bool titov_s_vector_sum_seq::VectorSumSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} -template class titov_s_vector_sum_seq::VectorSumSequential; -template class titov_s_vector_sum_seq::VectorSumSequential; -template class titov_s_vector_sum_seq::VectorSumSequential; -template class titov_s_vector_sum_seq::VectorSumSequential; -template class titov_s_vector_sum_seq::VectorSumSequential; From 15d8735fb1faedb2a80a86796184a1f91cef7506 Mon Sep 17 00:00:00 2001 From: KolodkinGrigorii <113025092+KolodkinGrigorii@users.noreply.github.com> Date: Tue, 29 Oct 2024 03:01:09 +0300 Subject: [PATCH 018/155] =?UTF-8?q?=D0=9A=D0=BE=D0=BB=D0=BE=D0=B4=D0=BA?= =?UTF-8?q?=D0=B8=D0=BD=20=D0=93=D1=80=D0=B8=D0=B3=D0=BE=D1=80=D0=B8=D0=B9?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2025.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87?= =?UTF-8?q?=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D0=BF=D1=80?= =?UTF-8?q?=D0=B5=D0=B4=D0=BB=D0=BE=D0=B6=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=B2?= =?UTF-8?q?=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.=20(#19)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Схема работы алгоритма. Последовательно: Проходим по строке и ищем знаки препинания, которые являются концом предложения: .,?,!,?!,!?,... MPI-алгоритм: Root-процесс делит строку на части и распределяет их по другим процессам. Те, в свою очередь, считают количество предложений в своем участке строки, после чего результаты подсчетов отправляются root-процессу и суммируются. --- .../func_tests/main.cpp | 306 ++++++++++++++++++ .../include/ops_mpi.hpp | 46 +++ .../perf_tests/main.cpp | 95 ++++++ .../kolodkin_g_sentence_count/src/ops_mpi.cpp | 101 ++++++ .../func_tests/main.cpp | 115 +++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 79 +++++ .../kolodkin_g_sentence_count/src/ops_seq.cpp | 35 ++ 8 files changed, 801 insertions(+) create mode 100644 tasks/mpi/kolodkin_g_sentence_count/func_tests/main.cpp create mode 100644 tasks/mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp create mode 100644 tasks/mpi/kolodkin_g_sentence_count/perf_tests/main.cpp create mode 100644 tasks/mpi/kolodkin_g_sentence_count/src/ops_mpi.cpp create mode 100644 tasks/seq/kolodkin_g_sentence_count/func_tests/main.cpp create mode 100644 tasks/seq/kolodkin_g_sentence_count/include/ops_seq.hpp create mode 100644 tasks/seq/kolodkin_g_sentence_count/perf_tests/main.cpp create mode 100644 tasks/seq/kolodkin_g_sentence_count/src/ops_seq.cpp diff --git a/tasks/mpi/kolodkin_g_sentence_count/func_tests/main.cpp b/tasks/mpi/kolodkin_g_sentence_count/func_tests/main.cpp new file mode 100644 index 00000000000..c226110bfa7 --- /dev/null +++ b/tasks/mpi/kolodkin_g_sentence_count/func_tests/main.cpp @@ -0,0 +1,306 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp" + +TEST(Parallel_Operations_MPI, Test_empty_string) { + boost::mpi::communicator world; + std::vector global_str; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + ASSERT_EQ(reference_out[0], 0); + } +} + +TEST(Parallel_Operations_MPI, Test_two_sentences) { + boost::mpi::communicator world; + std::vector global_str; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + std::string str = "Hello!My name is Grisha!"; + for (unsigned long int i = 0; i < str.length(); i++) { + global_str.push_back(str[i]); + } + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + ASSERT_EQ(reference_out[0], 2); + } +} + +TEST(Parallel_Operations_MPI, Test_sentences_with_special_symbols) { + boost::mpi::communicator world; + std::vector global_str; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + std::string str = "Hello! My name is Grisha! I have cat,dog,parrot."; + for (unsigned long int i = 0; i < str.length(); i++) { + global_str.push_back(str[i]); + } + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + ASSERT_EQ(reference_out[0], 3); + } +} + +TEST(Parallel_Operations_MPI, Test_sentences_with_special_symbols_in_end_of_sentence) { + boost::mpi::communicator world; + std::vector global_str; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + std::string str = "Hello! My name is Grisha! I have cat, dog, parrot. What is your name? How are you? Well..."; + for (unsigned long int i = 0; i < str.length(); i++) { + global_str.push_back(str[i]); + } + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + ASSERT_EQ(reference_out[0], 6); + } +} +TEST(Parallel_Operations_MPI, Test_sentences_with_double_symbols) { + boost::mpi::communicator world; + std::vector global_str; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + std::string str = + "Hello!! My name is Grisha!! I have two pets: cat,dog,parrot. What is your name?! How are you!? Well..."; + for (unsigned long int i = 0; i < str.length(); i++) { + global_str.push_back(str[i]); + } + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + ASSERT_EQ(reference_out[0], 6); + } +} +TEST(Parallel_Operations_MPI, Big_text) { + boost::mpi::communicator world; + std::vector global_str; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + std::string str = + "Otche nash, ize esi na nebeseh! Da svytitsa imya tvoe, da priidet tsarstvo tvoe! Da budet volya tvoya, ako na " + "nebeseh i na zemle. Hleb nas nasyshnii dazd nam dnes, i ostavi nam dolgi nasha. Yakozhe i my ostavlyaem " + "dolznikom nashim! I ne vvedi nas vo iskushenie, no izbavi nas ot lukavogo... Amin!"; + for (unsigned long int i = 0; i < str.length(); i++) { + global_str.push_back(str[i]); + } + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + ASSERT_EQ(reference_out[0], 7); + } +} \ No newline at end of file diff --git a/tasks/mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp b/tasks/mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp new file mode 100644 index 00000000000..b4599adbf35 --- /dev/null +++ b/tasks/mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp @@ -0,0 +1,46 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kolodkin_g_sentence_count_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + int localSentenceCount{}; + boost::mpi::communicator world; +}; + +} // namespace kolodkin_g_sentence_count_mpi \ No newline at end of file diff --git a/tasks/mpi/kolodkin_g_sentence_count/perf_tests/main.cpp b/tasks/mpi/kolodkin_g_sentence_count/perf_tests/main.cpp new file mode 100644 index 00000000000..b37ea023149 --- /dev/null +++ b/tasks/mpi/kolodkin_g_sentence_count/perf_tests/main.cpp @@ -0,0 +1,95 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp" + +TEST(mpi_kolodkin_g_sentence_count_test, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::string str = + "verifwriefnifnil!?vfnjklererjerjkerg...vrhklererffwjklfwefwejo!vefnklvevef?wfnkrkflwewefkl!vfnklvfklevf?" + "vrrnervevrnvreiev!"; + for (unsigned long int i = 0; i < str.length(); i++) { + global_str.push_back(str[i]); + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(7, global_sum[0]); + } +} + +TEST(mpi_kolodkin_g_sentence_count_test, test_task_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::string str = + "Na krayu dorogi stoyal dub! Eto byl ogromnuy, v dva obhvata dub. Knyaz Andrey podosel k dubu! Boze prabiy! " + "Kak " + "tebya zovut? Ya dub! A ya knyaz Andrey! Zdorovo! Poka-poka, dub! Poka, Andrey!"; + for (unsigned long int i = 0; i < str.length(); i++) { + global_str.push_back(str[i]); + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(10, global_sum[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/kolodkin_g_sentence_count/src/ops_mpi.cpp b/tasks/mpi/kolodkin_g_sentence_count/src/ops_mpi.cpp new file mode 100644 index 00000000000..3bebc56e8a6 --- /dev/null +++ b/tasks/mpi/kolodkin_g_sentence_count/src/ops_mpi.cpp @@ -0,0 +1,101 @@ +#include "mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool kolodkin_g_sentence_count_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + res = 0; + return true; +} + +bool kolodkin_g_sentence_count_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool kolodkin_g_sentence_count_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (unsigned long i = 0; i < input_.size(); i++) { + if ((input_[i] == '.' || input_[i] == '!' || input_[i] == '?') && + ((input_[i + 1] != '.' && input_[i + 1] != '!' && input_[i + 1] != '?') || i + 1 == input_.size())) { + res++; + } + } + return true; +} + +bool kolodkin_g_sentence_count_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool kolodkin_g_sentence_count_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + } + local_input_.resize(delta); + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta); + } + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + localSentenceCount = 0; + res = 0; + return true; +} + +bool kolodkin_g_sentence_count_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool kolodkin_g_sentence_count_mpi::TestMPITaskParallel::run() { + internal_order_test(); + for (unsigned long i = 0; i < local_input_.size(); i++) { + if ((local_input_[i] == '.' || local_input_[i] == '!' || local_input_[i] == '?') && + ((local_input_[i + 1] != '.' && local_input_[i + 1] != '!' && local_input_[i + 1] != '?') || + i + 1 == local_input_.size())) { + localSentenceCount++; + } + } + reduce(world, localSentenceCount, res, std::plus<>(), 0); + return true; +} + +bool kolodkin_g_sentence_count_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/kolodkin_g_sentence_count/func_tests/main.cpp b/tasks/seq/kolodkin_g_sentence_count/func_tests/main.cpp new file mode 100644 index 00000000000..37a6bd9f7a8 --- /dev/null +++ b/tasks/seq/kolodkin_g_sentence_count/func_tests/main.cpp @@ -0,0 +1,115 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/kolodkin_g_sentence_count/include/ops_seq.hpp" + +TEST(Sequential, Test_two_sentences) { + // Create data + std::string str = "Hello! My name is Grisha!"; + std::vector in(1, str); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 2); +} +TEST(Sequential, Test_sentences_with_special_symbols) { + // Create data + std::string str = "Hello!My name is Grisha! I have two pets: cat,dog,parrot."; + std::vector out(1, 0); + std::vector in(1, str); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 3); +} +TEST(Sequential, Test_sentences_with_special_symbols_in_end_of_sentence) { + // Create data + std::string str = + "Hello!My name is Grisha! I have two pets: cat,dog,parrot. What is your name?! How are you!? Well..."; + std::vector in(1, str); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 6); +} +TEST(Sequential, Test_sentences_with_double_symbols) { + // Create data + std::string str = + "Hello!! My name is Grisha!! I have two pets: cat,dog,parrot. What is your name?! How are you!? Well..."; + std::vector out(1, 0); + std::vector in(1, str); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 6); +} +TEST(Sequential, Big_text) { + // Create data + std::string str = + "Otche nash, ize esi na nebeseh! Da svytitsa imya tvoe, da priidet tsarstvo tvoe! Da budet volya tvoya, ako na " + "nebeseh i na zemle. Hleb nas nasyshnii dazd nam dnes, i ostavi nam dolgi nasha. Yakozhe i my ostavlyaem " + "dolznikom nashim! I ne vvedi nas vo iskushenie, no izbavi nas ot lukavogo... Amin!"; + std::vector out(1, 0); + std::vector in(1, str); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 7); +} \ No newline at end of file diff --git a/tasks/seq/kolodkin_g_sentence_count/include/ops_seq.hpp b/tasks/seq/kolodkin_g_sentence_count/include/ops_seq.hpp new file mode 100644 index 00000000000..05b6347b5b1 --- /dev/null +++ b/tasks/seq/kolodkin_g_sentence_count/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kolodkin_g_sentence_count_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int res{}; +}; + +} // namespace kolodkin_g_sentence_count_seq \ No newline at end of file diff --git a/tasks/seq/kolodkin_g_sentence_count/perf_tests/main.cpp b/tasks/seq/kolodkin_g_sentence_count/perf_tests/main.cpp new file mode 100644 index 00000000000..379de76bf08 --- /dev/null +++ b/tasks/seq/kolodkin_g_sentence_count/perf_tests/main.cpp @@ -0,0 +1,79 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kolodkin_g_sentence_count/include/ops_seq.hpp" + +TEST(seq_kolodkin_g_sentence_count_test, test_pipeline_run) { + // Create data + std::string str = + "verifwriefnifnil!?vfnjklererjerjkerg...vrhklererffwjklfwefwejo!vefnklvevef?wfnkrkflwewefkl!vfnklvfklevf?" + "vrrnervevrnvreiev!"; + std::vector out(1, 0); + std::vector in(1, str); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(7, out[0]); +} + +TEST(seq_kolodkin_g_sentence_count_test, test_task_run) { + // Create data + std::string str = "Hello! My name is Grisha! Good morning! How are you!"; + std::vector out(1, 0); + std::vector in(1, str); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(4, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/kolodkin_g_sentence_count/src/ops_seq.cpp b/tasks/seq/kolodkin_g_sentence_count/src/ops_seq.cpp new file mode 100644 index 00000000000..55d1276fc76 --- /dev/null +++ b/tasks/seq/kolodkin_g_sentence_count/src/ops_seq.cpp @@ -0,0 +1,35 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/kolodkin_g_sentence_count/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool kolodkin_g_sentence_count_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = *reinterpret_cast(taskData->inputs[0]); + res = 0; + return true; +} + +bool kolodkin_g_sentence_count_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool kolodkin_g_sentence_count_seq::TestTaskSequential::run() { + internal_order_test(); + for (unsigned long i = 0; i < input_.length(); i++) { + if ((input_[i] == '.' || input_[i] == '!' || input_[i] == '?') && + ((input_[i + 1] != '.' && input_[i + 1] != '!' && input_[i + 1] != '?') || i + 1 == input_.length())) { + res++; + } + } + return true; +} + +bool kolodkin_g_sentence_count_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From 958d1c9187b6706ac42eee9fd542e655615dd32e Mon Sep 17 00:00:00 2001 From: Andrey Sotskov <125256928+flavvvour@users.noreply.github.com> Date: Tue, 29 Oct 2024 03:10:33 +0300 Subject: [PATCH 019/155] =?UTF-8?q?=D0=A1=D0=BE=D1=86=D0=BA=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=90=D0=BD=D0=B4=D1=80=D0=B5=D0=B9.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=2010.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D1=8D=D0=BB?= =?UTF-8?q?=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=BC=D0=B0=D1=82?= =?UTF-8?q?=D1=80=D0=B8=D1=86=D1=8B.=20(#38)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Основные классы — `TestMPITaskSequential` и `TestMPITaskParallel` — реализуют последовательную и параллельную версию задачи. #### В функции run(): - Каждый процесс вычисляет сумму только для своей части матрицы. - Количество элементов матрицы, которые обрабатывает каждый процесс, определяется следующим образом: ``` cpp int total_elements = matrix.size(); int base_elements_per_process = total_elements / size; int remainder = total_elements % size; ``` **Индексы для обработки вычисляются так:** ``` cpp int start_idx = rank * base_elements_per_process + std::min(rank, remainder); int end_idx = start_idx + base_elements_per_process + (rank < remainder ? 1 : 0); ``` --- `base_elements_per_process` — это количество элементов, обрабатываемое каждым процессом в идеальном случае. `remainder` — остаток от деления, который учитывает, что последние процессы могут получить на один элемент больше, если общее количество элементов не делится нацело. `start_idx` и `end_idx` — индексы элементов, которые должен обработать каждый процесс. Таким образом, каждый процесс получает равную долю элементов, а последние процессы обрабатывают остаток. **Каждый процесс вычисляет сумму для своей части:** ``` cpp double local_sum = 0.0; for (int i = start_idx; i < end_idx; ++i) { local_sum += matrix[i]; } ``` **После того как все процессы вычислили свои локальные суммы, результат собирается с помощью `MPI_Reduce`:** ``` cpp double global_sum = 0.0; MPI_Reduce(&local_sum, &global_sum, 1, MPI_DOUBLE, MPI_SUM, 0, world); ``` Корневой процесс (`rank 0`) получает итоговую сумму всех элементов матрицы, что позволяет эффективно использовать ресурсы и время обработки для больших объемов данных. --- .../func_tests/main.cpp | 365 ++++++++++++++++++ .../include/ops_mpi.hpp | 59 +++ .../perf_tests/main.cpp | 87 +++++ .../src/ops_mpi.cpp | 106 +++++ .../func_tests/main.cpp | 152 ++++++++ .../include/ops_seq.hpp | 43 +++ .../perf_tests/main.cpp | 74 ++++ .../src/ops_seq.cpp | 113 ++++++ 8 files changed, 999 insertions(+) create mode 100644 tasks/mpi/sotskov_a_sum_element_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/sotskov_a_sum_element_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/sotskov_a_sum_element_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/sotskov_a_sum_element_matrix/func_tests/main.cpp create mode 100644 tasks/seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/sotskov_a_sum_element_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/sotskov_a_sum_element_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/sotskov_a_sum_element_matrix/func_tests/main.cpp b/tasks/mpi/sotskov_a_sum_element_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..a3fff0e94d1 --- /dev/null +++ b/tasks/mpi/sotskov_a_sum_element_matrix/func_tests/main.cpp @@ -0,0 +1,365 @@ +#include + +#include +#include +#include + +#include "mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp" + +TEST(sotskov_a_sum_element_matrix, test_constant_matrix) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 1000; + int cols = 1000; + std::vector matrix(rows * cols, 5.0); + double output = 0.0; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&output)); + taskDataSeq->outputs_count.emplace_back(1); + + sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential sequentialTask(taskDataSeq); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + double exact = 5.0 * rows * cols; + EXPECT_NEAR(output, exact, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_random_matrix) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 1000; + int cols = 1000; + std::vector matrix(rows * cols); + for (int i = 0; i < rows * cols; ++i) { + matrix[i] = static_cast(rand()) / RAND_MAX; + } + double output = 0.0; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&output)); + taskDataSeq->outputs_count.emplace_back(1); + + sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential sequentialTask(taskDataSeq); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + double exact = std::accumulate(matrix.begin(), matrix.end(), 0.0); + EXPECT_NEAR(output, exact, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_empty_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 0; + int cols = 0; + std::vector matrix; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + EXPECT_NEAR(output, 0.0, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_single_element_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 1; + int cols = 1; + std::vector matrix = {7.0}; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + EXPECT_NEAR(output, 7.0, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_zero_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 10; + int cols = 10; + std::vector matrix(rows * cols, 0.0); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + EXPECT_NEAR(output, 0.0, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_mixed_values_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 5; + int cols = 5; + std::vector matrix = {1.0, -1.0, 2.0, -2.0, 3.0, -3.0, 4.0, -4.0, 5.0, -5.0, 6.0, -6.0, 7.0, + -7.0, 8.0, -8.0, 9.0, -9.0, 10.0, -10.0, 11.0, -11.0, 12.0, -12.0, 13.0}; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + double exact = std::accumulate(matrix.begin(), matrix.end(), 0.0); + EXPECT_NEAR(output, exact, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_large_values_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 10; + int cols = 10; + std::vector matrix(rows * cols, 1e6); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + double exact = 1e6 * rows * cols; + EXPECT_NEAR(output, exact, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_data_distribution) { + boost::mpi::communicator world; + int rank = world.rank(); + int size = world.size(); + + int total_rows = 4; + int total_cols = 3; + std::vector matrix(total_rows * total_cols); + + if (rank == 0) { + for (int i = 0; i < total_rows; ++i) { + for (int j = 0; j < total_cols; ++j) { + matrix[i * total_cols + j] = static_cast(i * total_cols + j + 1); + } + } + } + + boost::mpi::broadcast(world, matrix.data(), matrix.size(), 0); + + int base_elements_per_process = total_rows * total_cols / size; + int remainder = (total_rows * total_cols) % size; + + int start_idx = rank * base_elements_per_process + std::min(rank, remainder); + int end_idx = start_idx + base_elements_per_process + (rank < remainder ? 1 : 0); + + for (int i = start_idx; i < end_idx; ++i) { + double expected_value = i + 1; + EXPECT_EQ(matrix[i], expected_value) << "Process " << rank << " has incorrect value at index " << i; + } +} + +TEST(sotskov_a_sum_element_matrix, test_data_distribution_single_element_matrix) { + boost::mpi::communicator world; + int rank = world.rank(); + int size = world.size(); + + int total_rows = 1; + int total_cols = 1; + std::vector matrix(total_rows * total_cols, 1.0); + + if (rank == 0) { + matrix[0] = 42.0; + } + + boost::mpi::broadcast(world, matrix.data(), matrix.size(), 0); + + int base_elements_per_process = total_rows * total_cols / size; + int remainder = (total_rows * total_cols) % size; + + int start_idx = rank * base_elements_per_process + std::min(rank, remainder); + int end_idx = start_idx + base_elements_per_process + (rank < remainder ? 1 : 0); + + if (start_idx < end_idx) { + EXPECT_EQ(matrix[start_idx], 42.0) << "Process " << rank << " should have value 42."; + } +} + +TEST(sotskov_a_sum_element_matrix, test_data_distribution_2x3_matrix) { + boost::mpi::communicator world; + int rank = world.rank(); + int size = world.size(); + + int total_rows = 2; + int total_cols = 3; + std::vector matrix(total_rows * total_cols); + + if (rank == 0) { + for (int i = 0; i < total_rows; ++i) { + for (int j = 0; j < total_cols; ++j) { + matrix[i * total_cols + j] = static_cast(i * total_cols + j + 1); + } + } + } + + boost::mpi::broadcast(world, matrix.data(), matrix.size(), 0); + + int base_elements_per_process = total_rows * total_cols / size; + int remainder = (total_rows * total_cols) % size; + + int start_idx = rank * base_elements_per_process + std::min(rank, remainder); + int end_idx = start_idx + base_elements_per_process + (rank < remainder ? 1 : 0); + + for (int i = start_idx; i < end_idx; ++i) { + double expected_value = i + 1; + EXPECT_EQ(matrix[i], expected_value) << "Process " << rank << " has incorrect value at index " << i; + } +} \ No newline at end of file diff --git a/tasks/mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp b/tasks/mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..a52b879d7ed --- /dev/null +++ b/tasks/mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp @@ -0,0 +1,59 @@ +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace sotskov_a_sum_element_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_matrix(const std::vector& matrix, int rows, int cols); + + private: + double sum_elements(const std::vector& matrix); + + std::vector matrix_; + int rows_{}; + int cols_{}; + double result_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_matrix(const std::vector& matrix, int rows, int cols); + + private: + double parallel_sum_elements(const std::vector& matrix); + + std::vector matrix_; + int rows_{}; + int cols_{}; + double local_result_{}; + double global_result_{}; + + boost::mpi::communicator world; +}; + +} // namespace sotskov_a_sum_element_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/sotskov_a_sum_element_matrix/perf_tests/main.cpp b/tasks/mpi/sotskov_a_sum_element_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..349dff7971a --- /dev/null +++ b/tasks/mpi/sotskov_a_sum_element_matrix/perf_tests/main.cpp @@ -0,0 +1,87 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp" + +TEST(sotskov_a_sum_element_matrix, test_pipeline_run) { + boost::mpi::communicator world; + int rows = 1000; + int cols = 1000; + std::vector matrix(rows * cols, 1.0); + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs.push_back(reinterpret_cast(&rows)); + taskDataPar->inputs.push_back(reinterpret_cast(&cols)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + auto exact = static_cast(rows * cols); + EXPECT_NEAR(output, exact, 1e-4); + } +} + +TEST(sotskov_a_sum_element_matrix, test_task_run) { + boost::mpi::communicator world; + int rows = 10000; + int cols = 10000; + std::vector matrix(rows * cols, 1.0); + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs.push_back(reinterpret_cast(&rows)); + taskDataPar->inputs.push_back(reinterpret_cast(&cols)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + auto exact = static_cast(rows * cols); + EXPECT_NEAR(output, exact, 1e-4); + } +} \ No newline at end of file diff --git a/tasks/mpi/sotskov_a_sum_element_matrix/src/ops_mpi.cpp b/tasks/mpi/sotskov_a_sum_element_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..23751002e38 --- /dev/null +++ b/tasks/mpi/sotskov_a_sum_element_matrix/src/ops_mpi.cpp @@ -0,0 +1,106 @@ +#include "mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + auto* tmp_ptr_matrix = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_rows = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_cols = reinterpret_cast(taskData->inputs[2]); + matrix_.assign(tmp_ptr_matrix, tmp_ptr_matrix + (*tmp_ptr_rows) * (*tmp_ptr_cols)); + rows_ = *tmp_ptr_rows; + cols_ = *tmp_ptr_cols; + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + result_ = std::accumulate(matrix_.begin(), matrix_.end(), 0.0); + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result_; + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto* tmp_ptr_matrix = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_rows = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_cols = reinterpret_cast(taskData->inputs[2]); + matrix_.assign(tmp_ptr_matrix, tmp_ptr_matrix + (*tmp_ptr_rows) * (*tmp_ptr_cols)); + rows_ = *tmp_ptr_rows; + cols_ = *tmp_ptr_cols; + } + broadcast(world, rows_, 0); + broadcast(world, cols_, 0); + if (world.rank() != 0) { + matrix_.resize(rows_ * cols_); + } + broadcast(world, matrix_.data(), matrix_.size(), 0); + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + local_result_ = parallel_sum_elements(matrix_); + reduce(world, local_result_, global_result_, std::plus<>(), 0); + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = global_result_; + } + return true; +} + +double sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::parallel_sum_elements(const std::vector& matrix) { + int rank = world.rank(); + int size = world.size(); + int total_elements = matrix.size(); + + int base_elements_per_process = total_elements / size; + int remainder = total_elements % size; + + int start_idx = rank * base_elements_per_process + std::min(rank, remainder); + int end_idx = start_idx + base_elements_per_process + (rank < remainder ? 1 : 0); + + double local_sum = 0.0; + for (int i = start_idx; i < end_idx; ++i) { + local_sum += matrix[i]; + } + + double global_sum = 0.0; + MPI_Reduce(&local_sum, &global_sum, 1, MPI_DOUBLE, MPI_SUM, 0, world); + + return global_sum; +} diff --git a/tasks/seq/sotskov_a_sum_element_matrix/func_tests/main.cpp b/tasks/seq/sotskov_a_sum_element_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..922a3c59176 --- /dev/null +++ b/tasks/seq/sotskov_a_sum_element_matrix/func_tests/main.cpp @@ -0,0 +1,152 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include +#include + +#include "seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp" + +TEST(Sequential, Test_Sum_Large_Matrix) { + const int rows = 1000; + const int columns = 1000; + + std::vector global_matrix = sotskov_a_sum_element_matrix_seq::create_random_matrix_double(rows, columns); + std::vector reference_sum(1, 0); + + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(global_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(global_matrix.data()))); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(global_matrix)); +} + +TEST(Sequential, Test_Sum_Negative_Values) { + const int rows = 10; + const int columns = 10; + + std::vector global_matrix = sotskov_a_sum_element_matrix_seq::create_random_matrix_int(rows, columns); + for (auto& elem : global_matrix) { + elem = -abs(elem); + } + std::vector reference_sum(1, 0); + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(global_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(global_matrix.data()))); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(global_matrix)); +} + +TEST(Sequential, Test_Sum_Int) { + srand(static_cast(time(nullptr))); + + const int rows = sotskov_a_sum_element_matrix_seq::random_range(1, 100); + const int columns = sotskov_a_sum_element_matrix_seq::random_range(1, 100); + + std::vector global_matrix = sotskov_a_sum_element_matrix_seq::create_random_matrix_int(rows, columns); + std::vector reference_sum(1, 0); + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(global_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(global_matrix.data()))); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(global_matrix)); +} + +TEST(Sequential, Test_Sum_Double) { + srand(static_cast(time(nullptr))); + + const int rows = sotskov_a_sum_element_matrix_seq::random_range(1, 100); + const int columns = sotskov_a_sum_element_matrix_seq::random_range(1, 100); + + std::vector global_matrix = sotskov_a_sum_element_matrix_seq::create_random_matrix_double(rows, columns); + std::vector reference_sum(1, 0.0); + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(global_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(global_matrix.data()))); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(global_matrix)); +} + +TEST(Sequential, Test_Empty_Matrix) { + std::vector reference_sum(1, 0); + std::vector empty_matrix; + + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(empty_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(empty_matrix.data())); + taskDataSeq->inputs_count.emplace_back(empty_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], 0); +} + +TEST(Sequential, Test_Zero_Columns_Rows) { + auto zero_columns = sotskov_a_sum_element_matrix_seq::create_random_matrix_int(1, 0); + EXPECT_TRUE(zero_columns.empty()); + auto zero_rows = sotskov_a_sum_element_matrix_seq::create_random_matrix_int(0, 1); + EXPECT_TRUE(zero_rows.empty()); +} + +TEST(Sequential, Test_Wrong_Validation) { + std::vector global_matrix; + std::vector global_sum(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataSeq->outputs_count.emplace_back(global_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt testTask(taskDataSeq); + ASSERT_FALSE(testTask.validation()); +} diff --git a/tasks/seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp b/tasks/seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..de3b47342ee --- /dev/null +++ b/tasks/seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp @@ -0,0 +1,43 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace sotskov_a_sum_element_matrix_seq { + +std::vector create_random_matrix_int(int rows, int cols); +std::vector create_random_matrix_double(int rows, int cols); + +int sum_matrix_elements_int(const std::vector& matrix); +double sum_matrix_elements_double(const std::vector& matrix); +int random_range(int min, int max); + +class TestTaskSequentialInt : public ppc::core::Task { + public: + explicit TestTaskSequentialInt(std::shared_ptr task_data); + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_data_; + int result_{0}; +}; + +class TestTaskSequentialDouble : public ppc::core::Task { + public: + explicit TestTaskSequentialDouble(std::shared_ptr task_data); + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_data_; + double result_{0.0}; +}; + +} // namespace sotskov_a_sum_element_matrix_seq diff --git a/tasks/seq/sotskov_a_sum_element_matrix/perf_tests/main.cpp b/tasks/seq/sotskov_a_sum_element_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..458abe1262f --- /dev/null +++ b/tasks/seq/sotskov_a_sum_element_matrix/perf_tests/main.cpp @@ -0,0 +1,74 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp" + +TEST(sotskov_a_sum_element_matrix, test_pipeline_run) { + const int rows = 10000; + const int columns = 10000; + + std::vector in(rows * columns, 1); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(std::accumulate(in.begin(), in.end(), 0), out[0]); +} + +TEST(sotskov_a_sum_element_matrix, test_task_run) { + const int rows = 8000; + const int columns = 8000; + + std::vector in(rows * columns, 1); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(std::accumulate(in.begin(), in.end(), 0), out[0]); +} diff --git a/tasks/seq/sotskov_a_sum_element_matrix/src/ops_seq.cpp b/tasks/seq/sotskov_a_sum_element_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..03aae4aab7a --- /dev/null +++ b/tasks/seq/sotskov_a_sum_element_matrix/src/ops_seq.cpp @@ -0,0 +1,113 @@ +#include "seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp" + +#include +#include +#include +#include + +int sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(const std::vector& matrix) { + return std::accumulate(matrix.begin(), matrix.end(), 0); +} + +double sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(const std::vector& matrix) { + return std::accumulate(matrix.begin(), matrix.end(), 0.0); +} + +int sotskov_a_sum_element_matrix_seq::random_range(int min, int max) { + static std::random_device rd; + static std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(min, max); + return dis(gen); +} + +std::vector sotskov_a_sum_element_matrix_seq::create_random_matrix_int(int rows, int cols) { + if (rows <= 0 || cols <= 0) { + return {}; + } + + std::vector matrix(rows * cols); + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(-100, 100); + + std::generate(matrix.begin(), matrix.end(), [&]() { return dis(gen); }); + return matrix; +} + +std::vector sotskov_a_sum_element_matrix_seq::create_random_matrix_double(int rows, int cols) { + if (rows <= 0 || cols <= 0) { + return {}; + } + + std::vector matrix(rows * cols); + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<> dis(-100.0, 100.0); + + std::generate(matrix.begin(), matrix.end(), [&]() { return dis(gen); }); + return matrix; +} + +sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::TestTaskSequentialInt( + std::shared_ptr task_data) + : Task(std::move(task_data)) {} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::pre_processing() { + internal_order_test(); + result_ = 0; + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_data_.assign(tmp_ptr, tmp_ptr + taskData->inputs_count[0]); + return true; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::run() { + internal_order_test(); + result_ = std::accumulate(input_data_.begin(), input_data_.end(), 0); + return true; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::post_processing() { + internal_order_test(); + if (!taskData->outputs.empty() && taskData->outputs[0] != nullptr) { + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; + } + return false; +} + +sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::TestTaskSequentialDouble( + std::shared_ptr task_data) + : Task(std::move(task_data)) {} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::pre_processing() { + internal_order_test(); + result_ = 0.0; + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_data_.assign(tmp_ptr, tmp_ptr + taskData->inputs_count[0]); + return true; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::run() { + internal_order_test(); + result_ = std::accumulate(input_data_.begin(), input_data_.end(), 0.0); + return true; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::post_processing() { + internal_order_test(); + if (!taskData->outputs.empty() && taskData->outputs[0] != nullptr) { + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; + } + return false; +} From 5daf9ab13b05a0ed91c1162cc013d1e8571de199 Mon Sep 17 00:00:00 2001 From: Aleksey Date: Tue, 29 Oct 2024 18:34:42 +0300 Subject: [PATCH 020/155] =?UTF-8?q?=D0=91=D0=B0=D1=80=D0=B0=D0=BD=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B5=D0=B9=20.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=206.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0?= =?UTF-8?q?=20=D0=BD=D0=B0=D1=80=D1=83=D1=88=D0=B5=D0=BD=D0=B8=D0=B9=20?= =?UTF-8?q?=D1=83=D0=BF=D0=BE=D1=80=D1=8F=D0=B4=D0=BE=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D0=BE=D1=81=D1=82=D0=B8=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4?= =?UTF-8?q?=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#?= =?UTF-8?q?6)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SEQ: В этой задаче мы последовательно проходимся по массиву и считаем количество нарушений порядка следования ( не уменьшая общности, в моей задаче - это порядок строгого возрастания). Последовательно сравниваем соседние элементы массива и все. Так как алгоритм достаточно прост, в тестах я проверял корректное исполнение с различными типами данных и при размере вектора, равного нулю. MPI: Здесь мы разделяем исполнение самым простым способом: создаем в каждом процессе отдельный буфер с данными, куда root-процесс скидывает части исходного вектора. В самих non-root процессах задача сводится к вышеизложенной. После завершения обработки все данные от non-root процессов суммируются в root процессе. Для обеспечения правильной работы алгоритм отправляет данные с offset`ом в один элемент(чтобы не возникало ошибок на стыке разделенных векторов). В тестах проверяю работу с разными типами данных и корректную работу в критических ситуациях(когда размер массива равен 0 и размер вектора равен количеству процессов). --- .../func_tests/main.cpp | 283 ++++++++++++++++++ .../include/header.hpp | 30 ++ .../perf_tests/main.cpp | 77 +++++ .../src/source.cpp | 100 +++++++ .../func_tests/main.cpp | 158 ++++++++++ .../include/header.hpp | 28 ++ .../perf_tests/main.cpp | 94 ++++++ .../src/source.cpp | 53 ++++ 8 files changed, 823 insertions(+) create mode 100644 tasks/mpi/baranov_a_num_of_orderly_violations/func_tests/main.cpp create mode 100644 tasks/mpi/baranov_a_num_of_orderly_violations/include/header.hpp create mode 100644 tasks/mpi/baranov_a_num_of_orderly_violations/perf_tests/main.cpp create mode 100644 tasks/mpi/baranov_a_num_of_orderly_violations/src/source.cpp create mode 100644 tasks/seq/baranov_a_num_of_orderly_violations/func_tests/main.cpp create mode 100644 tasks/seq/baranov_a_num_of_orderly_violations/include/header.hpp create mode 100644 tasks/seq/baranov_a_num_of_orderly_violations/perf_tests/main.cpp create mode 100644 tasks/seq/baranov_a_num_of_orderly_violations/src/source.cpp diff --git a/tasks/mpi/baranov_a_num_of_orderly_violations/func_tests/main.cpp b/tasks/mpi/baranov_a_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..714e9513fe6 --- /dev/null +++ b/tasks/mpi/baranov_a_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,283 @@ +#include + +#include "mpi/baranov_a_num_of_orderly_violations/include/header.hpp" + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_0_int) { + const int N = 0; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_100_int) { + const int N = 100; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_10000_int) { + const int N = 10000; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_0_double) { + const int N = 0; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_10000_double) { + const int N = 10000; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_size_of_vec_is_equal_to_world_size) { + // Create data + boost::mpi::communicator world; + const int N = world.size(); + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_size_of_vec_is_less_than_world_size) { + // Create data + boost::mpi::communicator world; + const int N = world.size() - 1; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_100000_unsigned_int) { + const int N = 100000; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_odd_numbers_int_1) { + const int N = 78527; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_odd_numbers_int_2) { + const int N = 2356895; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_odd_numbers_int_3) { + const int N = 17; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} diff --git a/tasks/mpi/baranov_a_num_of_orderly_violations/include/header.hpp b/tasks/mpi/baranov_a_num_of_orderly_violations/include/header.hpp new file mode 100644 index 00000000000..bff2b076a02 --- /dev/null +++ b/tasks/mpi/baranov_a_num_of_orderly_violations/include/header.hpp @@ -0,0 +1,30 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace baranov_a_num_of_orderly_violations_mpi { +template +class num_of_orderly_violations : public ppc::core::Task { + public: + explicit num_of_orderly_violations(std::shared_ptr taskData_) : Task(taskData_) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + cntype seq_proc(std::vector vec); + + private: + std::vector input_; + std::vector loc_vec_; + cntype num_; + boost::mpi::communicator world; + int my_loc_vec_size; +}; +} // namespace baranov_a_num_of_orderly_violations_mpi diff --git a/tasks/mpi/baranov_a_num_of_orderly_violations/perf_tests/main.cpp b/tasks/mpi/baranov_a_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..737b8f4a6ca --- /dev/null +++ b/tasks/mpi/baranov_a_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,77 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/baranov_a_num_of_orderly_violations/src/source.cpp" +TEST(mpi_baranov_a_num_of_orderly_violations_perf_test, test_pipeline_run) { + const int count_size_vector = 10000000; + boost::mpi::communicator world; + std::vector global_vec(count_size_vector); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, global_vec.size()); + std::generate(global_vec.begin(), global_vec.end(), [&dist, &reng] { return dist(reng); }); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + auto temp = testMpiTaskParallel->seq_proc(global_vec); + ASSERT_EQ(temp, out[0]); + } +} +TEST(mpi_baranov_a_num_of_orderly_violations_perf_test, test_task_run) { + const int count_size_vector = 10000000; + boost::mpi::communicator world; + std::vector global_vec(count_size_vector); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, global_vec.size()); + std::generate(global_vec.begin(), global_vec.end(), [&dist, &reng] { return dist(reng); }); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + auto temp = testMpiTaskParallel->seq_proc(global_vec); + ASSERT_EQ(out[0], temp); + } +} diff --git a/tasks/mpi/baranov_a_num_of_orderly_violations/src/source.cpp b/tasks/mpi/baranov_a_num_of_orderly_violations/src/source.cpp new file mode 100644 index 00000000000..dda48e2b4ee --- /dev/null +++ b/tasks/mpi/baranov_a_num_of_orderly_violations/src/source.cpp @@ -0,0 +1,100 @@ +#include "mpi/baranov_a_num_of_orderly_violations/include/header.hpp" + +namespace baranov_a_num_of_orderly_violations_mpi { +template +cntype num_of_orderly_violations::seq_proc(std::vector vec) { + cntype num = 0; + int n = vec.size(); + for (int i = 0; i < n - 1; ++i) { + if (vec[i + 1] < vec[i]) { + num++; + } + } + return num; +} +template +bool num_of_orderly_violations::pre_processing() { + internal_order_test(); + int myid = world.rank(); + int world_size = world.size(); + int n; + if (myid == 0) { + n = taskData->inputs_count[0]; + input_ = std::vector(n + 1); + void* ptr_r = taskData->inputs[0]; + void* ptr_d = input_.data(); + memcpy(ptr_d, ptr_r, + sizeof(iotype) * n); // there input_ is a vector of pure data not uint8 so we can scatter to loc_vectors + num_ = 0; + } + broadcast(world, n, 0); // for each proc we calculate size and then scatter + int vec_send_size = n / world_size; + int overflow_size = n % world_size; + std::vector send_counts(world_size, vec_send_size); + std::vector displs(world_size, 0); + int loc_vec_size = 0; + if (myid == 0) { + for (int i = 0; i != world_size - 1; ++i) { + if (i < overflow_size) { + ++send_counts[i]; + } + displs[i + 1] = ((send_counts[i] - 1) + displs[i]); + ++send_counts[i + 1]; + } + loc_vec_size = send_counts[0]; + } else { + if (myid < overflow_size) { + ++send_counts[myid]; + } + ++send_counts[myid]; + loc_vec_size = send_counts[myid]; + } + loc_vec_.reserve(loc_vec_size); + if (myid == 0) { + boost::mpi::scatterv(world, input_, send_counts, displs, loc_vec_.data(), loc_vec_size, 0); + } else { + boost::mpi::scatterv(world, loc_vec_.data(), loc_vec_size, 0); + } + my_loc_vec_size = loc_vec_size; + return true; +} +template +bool num_of_orderly_violations::run() { + internal_order_test(); + int loc_num = 0; + for (int i = 0; i < my_loc_vec_size - 1; ++i) { + if (loc_vec_[i + 1] < loc_vec_[i]) { + loc_num++; + } + } + + reduce(world, loc_num, num_, std::plus(), 0); + return true; +} +template +bool num_of_orderly_violations::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = num_; + } + return true; +} +template +bool num_of_orderly_violations::validation() { + internal_order_test(); + // Check count elements of output + if (world.rank() == 0) { + if (taskData->outputs_count[0] == 1 && taskData->inputs_count.size() == 1 && taskData->inputs_count[0] >= 0) { + return true; + } + } + return true; +} + +template class baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations; + +template class baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations; + +template class baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations; +} // namespace baranov_a_num_of_orderly_violations_mpi \ No newline at end of file diff --git a/tasks/seq/baranov_a_num_of_orderly_violations/func_tests/main.cpp b/tasks/seq/baranov_a_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..0f874e379d3 --- /dev/null +++ b/tasks/seq/baranov_a_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,158 @@ +#include + +#include "seq/baranov_a_num_of_orderly_violations/include/header.hpp" +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_0_int) { + const int N = 0; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_10_int) { + const int N = 10; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_100_int) { + const int N = 100; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_0_double) { + const int N = 0; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_100_double) { + const int N = 100; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} + +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_1000_double) { + const int N = 1000; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_10000_double) { + const int N = 10000; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/baranov_a_num_of_orderly_violations/include/header.hpp b/tasks/seq/baranov_a_num_of_orderly_violations/include/header.hpp new file mode 100644 index 00000000000..74c14417e4a --- /dev/null +++ b/tasks/seq/baranov_a_num_of_orderly_violations/include/header.hpp @@ -0,0 +1,28 @@ +#pragma once +#include +#include +#include +#include + +#include "core/task/include/task.hpp" +namespace baranov_a_num_of_orderly_violations_seq { +template +class num_of_orderly_violations : public ppc::core::Task { + public: + explicit num_of_orderly_violations(std::shared_ptr taskData_) : Task(taskData_) {} + bool pre_processing() override; + + bool validation() override; + + bool run() override; + + bool post_processing() override; + + cntype seq_proc(std::vector vec); + + private: + std::vector input_; + cntype num_; +}; + +} // namespace baranov_a_num_of_orderly_violations_seq diff --git a/tasks/seq/baranov_a_num_of_orderly_violations/perf_tests/main.cpp b/tasks/seq/baranov_a_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..d34695946ed --- /dev/null +++ b/tasks/seq/baranov_a_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,94 @@ + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/baranov_a_num_of_orderly_violations/include/header.hpp" + +TEST(sequential_baranov_a_num_of_orderly_violations_perf_test, test_pipeline_run) { + const int count = 10000000; + + // Create data + std::vector in(count); + std::vector out(1, 0); + + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, in.size()); + std::generate(in.begin(), in.end(), [&dist, &reng] { return dist(reng); }); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + auto temp = testTaskSequential->seq_proc(in); + + ASSERT_EQ(temp, out[0]); +} + +TEST(sequential_baranov_a_num_of_orderly_violations_perf_test, test_task_run) { + const int count = 10000000; + + // Create data + std::vector in(count); + std::vector out(1, 0); + + // Create TaskData + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, in.size()); + std::generate(in.begin(), in.end(), [&dist, &reng] { return dist(reng); }); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + auto temp = testTaskSequential->seq_proc(in); + + ASSERT_EQ(temp, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/baranov_a_num_of_orderly_violations/src/source.cpp b/tasks/seq/baranov_a_num_of_orderly_violations/src/source.cpp new file mode 100644 index 00000000000..c90a5422e84 --- /dev/null +++ b/tasks/seq/baranov_a_num_of_orderly_violations/src/source.cpp @@ -0,0 +1,53 @@ +#include "seq/baranov_a_num_of_orderly_violations/include/header.hpp" +namespace baranov_a_num_of_orderly_violations_seq { + +template +cntype num_of_orderly_violations::seq_proc(std::vector vec) { + cntype num = 0; + int n = vec.size(); + for (int i = 0; i < n - 1; ++i) { + if (vec[i] < vec[i + 1]) { + ++num; + } + } + return num; +} + +template +bool num_of_orderly_violations::pre_processing() { + internal_order_test(); + // Init vectors + int n = taskData->inputs_count[0]; + input_ = std::vector(n); + void* ptr_r = taskData->inputs[0]; + void* ptr_d = input_.data(); + memcpy(ptr_d, ptr_r, sizeof(iotype) * n); + // Init value for output + num_ = 0; + return true; +} +template +bool num_of_orderly_violations::validation() { + internal_order_test(); + // Check count elements of output + + return (taskData->outputs_count[0] == 1); +} +template +bool num_of_orderly_violations::run() { + internal_order_test(); + num_ = seq_proc(input_); + + return true; +} +template +bool baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = num_; + return true; +} + +template class baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations; + +template class baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations; +} // namespace baranov_a_num_of_orderly_violations_seq \ No newline at end of file From 192daadaa29038e586506700fc33f8c052a8a16a Mon Sep 17 00:00:00 2001 From: Liza Date: Wed, 30 Oct 2024 19:32:25 +0300 Subject: [PATCH 021/155] =?UTF-8?q?=D0=A4=D0=B8=D0=BB=D0=B0=D1=82=D1=8C?= =?UTF-8?q?=D0=B5=D0=B2=D0=B0=20=D0=95=D0=BB=D0=B8=D0=B7=D0=B0=D0=B2=D0=B5?= =?UTF-8?q?=D1=82=D0=B0.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20?= =?UTF-8?q?=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2025.=20=D0=9F=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=87=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0?= =?UTF-8?q?=20=D0=BF=D1=80=D0=B5=D0=B4=D0=BB=D0=BE=D0=B6=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B9=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.=20(#46)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание последовательной задачи** - Последовательно считает количество знаков заканчивающих предложение. **Описание MPI задачи** - Строка разбивается на подстроки одинаковой длины. Каждый процесс получает свою подстроку, затем все процессы отправляют свой результат главному процессу, который собирает их и определяет окончательное количество предложений в строке. --- .../func_tests/main.cpp | 347 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 +++ .../perf_tests/main.cpp | 91 +++++ .../src/ops_mpi.cpp | 104 ++++++ .../func_tests/main.cpp | 153 ++++++++ .../include/ops_seq.hpp | 23 ++ .../perf_tests/main.cpp | 89 +++++ .../src/ops_seq.cpp | 38 ++ 8 files changed, 893 insertions(+) create mode 100644 tasks/mpi/filateva_e_number_sentences_line/func_tests/main.cpp create mode 100644 tasks/mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp create mode 100644 tasks/mpi/filateva_e_number_sentences_line/perf_tests/main.cpp create mode 100644 tasks/mpi/filateva_e_number_sentences_line/src/ops_mpi.cpp create mode 100644 tasks/seq/filateva_e_number_sentences_line/func_tests/main.cpp create mode 100644 tasks/seq/filateva_e_number_sentences_line/include/ops_seq.hpp create mode 100644 tasks/seq/filateva_e_number_sentences_line/perf_tests/main.cpp create mode 100644 tasks/seq/filateva_e_number_sentences_line/src/ops_seq.cpp diff --git a/tasks/mpi/filateva_e_number_sentences_line/func_tests/main.cpp b/tasks/mpi/filateva_e_number_sentences_line/func_tests/main.cpp new file mode 100644 index 00000000000..e363d6b9cb7 --- /dev/null +++ b/tasks/mpi/filateva_e_number_sentences_line/func_tests/main.cpp @@ -0,0 +1,347 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include + +#include +#include +#include +#include + +#include "mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp" + +std::string getRandomLine(int max_count) { + std::random_device dev; + std::mt19937 gen(dev()); + std::string line = "Hello world. How many words are in this sentence? The task of parallel programming!"; + int count = gen() % max_count; + for (int i = 0; i < count; ++i) { + line += line; + } + return line; +} + +TEST(filateva_e_number_sentences_line_mpi, Test_countSentences) { + std::string line = "Hello world. How many words are in this sentence? The task of parallel programming!"; + int count = filateva_e_number_sentences_line_mpi::countSentences(line); + ASSERT_EQ(3, count); +} + +TEST(filateva_e_number_sentences_line_mpi, one_sentence_line_1) { + boost::mpi::communicator world; + std::string line = "Hello world."; + std::vector out(1, 0); + // // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(1, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, one_sentence_line_2) { + boost::mpi::communicator world; + std::string line = "Hello world"; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(1, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, one_sentence_line_3) { + boost::mpi::communicator world; + std::string line = "Hello world!"; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(1, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, one_sentence_line_4) { + boost::mpi::communicator world; + std::string line = "Hello world?"; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(1, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, empty_string) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(0, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, random_text_1) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + line = getRandomLine(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + // Create Task + filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential NumSSeq(taskDataSeq); + ASSERT_EQ(NumSSeq.validation(), true); + NumSSeq.pre_processing(); + NumSSeq.run(); + NumSSeq.post_processing(); + + ASSERT_EQ(out[0], ref_out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, random_text_2) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + line = getRandomLine(3); + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + // // Create data + std::vector ref_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + // Create Task + filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential NumSSeq(taskDataSeq); + ASSERT_EQ(NumSSeq.validation(), true); + NumSSeq.pre_processing(); + NumSSeq.run(); + NumSSeq.post_processing(); + + ASSERT_EQ(out[0], ref_out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, random_text_3) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + line = getRandomLine(5); + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + // // Create data + std::vector ref_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + // Create Task + filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential NumSSeq(taskDataSeq); + ASSERT_EQ(NumSSeq.validation(), true); + NumSSeq.pre_processing(); + NumSSeq.run(); + NumSSeq.post_processing(); + + ASSERT_EQ(out[0], ref_out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, random_text_4) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + line = getRandomLine(10); + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + // // Create data + std::vector ref_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + // Create Task + filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential NumSSeq(taskDataSeq); + ASSERT_EQ(NumSSeq.validation(), true); + NumSSeq.pre_processing(); + NumSSeq.run(); + NumSSeq.post_processing(); + + ASSERT_EQ(out[0], ref_out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, sentence_without_dot) { + boost::mpi::communicator world; + std::string line = "Hello world. Hello world! Hello world"; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(3, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp b/tasks/mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp new file mode 100644 index 00000000000..97a7a689ae5 --- /dev/null +++ b/tasks/mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace filateva_e_number_sentences_line_mpi { + +int countSentences(std::string line); + +class NumberSentencesLineSequential : public ppc::core::Task { + public: + explicit NumberSentencesLineSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string line; + int sentence_count; +}; + +class NumberSentencesLineParallel : public ppc::core::Task { + public: + explicit NumberSentencesLineParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string line; + std::string local_line; + int sentence_count; + boost::mpi::communicator world; +}; + +} // namespace filateva_e_number_sentences_line_mpi \ No newline at end of file diff --git a/tasks/mpi/filateva_e_number_sentences_line/perf_tests/main.cpp b/tasks/mpi/filateva_e_number_sentences_line/perf_tests/main.cpp new file mode 100644 index 00000000000..d49eb9a7526 --- /dev/null +++ b/tasks/mpi/filateva_e_number_sentences_line/perf_tests/main.cpp @@ -0,0 +1,91 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp" + +TEST(filateva_e_number_sentences_line_mpi, test_pipeline_run) { + int count = 22; + boost::mpi::communicator world; + std::string line = "Hello world."; + std::vector out(1, 0); + // // Create TaskData + for (int i = 0; i < count; i++) { + line += line; + } + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + auto NumS = std::make_shared(taskDataPar); + ASSERT_EQ(NumS->validation(), true); + NumS->pre_processing(); + NumS->run(); + NumS->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(NumS); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(4194304, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, test_task_run) { + int count = 22; + boost::mpi::communicator world; + std::string line = "Hello world."; + std::vector out(1, 0); + // // Create TaskData + for (int i = 0; i < count; i++) { + line += line; + } + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + auto NumS = std::make_shared(taskDataPar); + ASSERT_EQ(NumS->validation(), true); + NumS->pre_processing(); + NumS->run(); + NumS->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(NumS); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(4194304, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/filateva_e_number_sentences_line/src/ops_mpi.cpp b/tasks/mpi/filateva_e_number_sentences_line/src/ops_mpi.cpp new file mode 100644 index 00000000000..8453deb8568 --- /dev/null +++ b/tasks/mpi/filateva_e_number_sentences_line/src/ops_mpi.cpp @@ -0,0 +1,104 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include "mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp" + +#include +#include +#include +#include + +int filateva_e_number_sentences_line_mpi::countSentences(std::string line) { + int count = 0; + for (long unsigned int i = 0; i < line.size(); ++i) { + if (line[i] == '.' || line[i] == '?' || line[i] == '!') { + ++count; + } + } + return count; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential::pre_processing() { + internal_order_test(); + // Init vectors + line = std::string(std::move(reinterpret_cast(taskData->inputs[0]))); + sentence_count = 0; + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential::run() { + internal_order_test(); + sentence_count = countSentences(line); + if (!line.empty() && line.back() != '.' && line.back() != '?' && line.back() != '!') { + ++sentence_count; + } + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = sentence_count; + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + line = std::string(std::move(reinterpret_cast(taskData->inputs[0]))); + } + + sentence_count = 0; + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel::run() { + internal_order_test(); + unsigned int delta = 0; + unsigned int remains = 0; + int local_sentence_count; + if (world.rank() == 0 && world.size() > 1) { + delta = line.size() / (world.size() - 1); + remains = line.size() % (world.size() - 1); + } else if (world.rank() == 0 && world.size() == 1) { + remains = line.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + for (int proc = 0; proc < (world.size() - 1); proc++) { + world.send(proc + 1, 0, line.data() + proc * delta + remains, delta); + } + local_line = std::string(line.begin(), line.begin() + remains); + } else { + local_line = std::string(delta, '*'); + world.recv(0, 0, local_line.data(), delta); + } + + local_sentence_count = countSentences(local_line); + if (world.rank() == 0 && !line.empty() && line.back() != '.' && line.back() != '?' && line.back() != '!') { + ++local_sentence_count; + } + reduce(world, local_sentence_count, sentence_count, std::plus(), 0); + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = sentence_count; + } + return true; +} diff --git a/tasks/seq/filateva_e_number_sentences_line/func_tests/main.cpp b/tasks/seq/filateva_e_number_sentences_line/func_tests/main.cpp new file mode 100644 index 00000000000..a189d6d3df2 --- /dev/null +++ b/tasks/seq/filateva_e_number_sentences_line/func_tests/main.cpp @@ -0,0 +1,153 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include + +#include + +#include "seq/filateva_e_number_sentences_line/include/ops_seq.hpp" + +TEST(filateva_e_number_sentences_line_seq, one_sentence_line_1) { + // Create data + std::string line = "Hello world."; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, one_sentence_line_2) { + // Create data + std::string line = "Hello world"; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, one_sentence_line_3) { + // Create data + std::string line = "Hello world!"; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, one_sentence_line_4) { + // Create data + std::string line = "Hello world?"; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, several_sentence_line_1) { + // Create data + std::string line = "Hello world. How many words are in this sentence? The task of parallel programming."; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(3, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, several_sentence_line_2) { + // Create data + std::string line = "Hello world. How many words are in this sentence? The task of parallel programming"; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(3, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, empty_string) { + // Create data + std::string line; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/filateva_e_number_sentences_line/include/ops_seq.hpp b/tasks/seq/filateva_e_number_sentences_line/include/ops_seq.hpp new file mode 100644 index 00000000000..68e316f25c1 --- /dev/null +++ b/tasks/seq/filateva_e_number_sentences_line/include/ops_seq.hpp @@ -0,0 +1,23 @@ +// Filateva Elizaveta Number_of_sentences_per_line + +#include +#include + +#include "core/task/include/task.hpp" + +namespace filateva_e_number_sentences_line_seq { + +class NumberSentencesLine : public ppc::core::Task { + public: + explicit NumberSentencesLine(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string line; + int sentence_count; +}; + +} // namespace filateva_e_number_sentences_line_seq \ No newline at end of file diff --git a/tasks/seq/filateva_e_number_sentences_line/perf_tests/main.cpp b/tasks/seq/filateva_e_number_sentences_line/perf_tests/main.cpp new file mode 100644 index 00000000000..a4e943b6bbe --- /dev/null +++ b/tasks/seq/filateva_e_number_sentences_line/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/filateva_e_number_sentences_line/include/ops_seq.hpp" + +TEST(filateva_e_number_sentences_line_seq, test_pipeline_run) { + const int count = 20; + + // Create data + std::string line("Helo world."); + std::vector out(1, 0); + + for (int i = 0; i < count; ++i) { + line += line; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto NumS = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(NumS); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1048576, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, test_task_run) { + const int count = 20; + + // Create data + std::string line("Helo world."); + std::vector out(1, 0); + + for (int i = 0; i < count; ++i) { + line += line; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto NumS = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(NumS); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1048576, out[0]); +} diff --git a/tasks/seq/filateva_e_number_sentences_line/src/ops_seq.cpp b/tasks/seq/filateva_e_number_sentences_line/src/ops_seq.cpp new file mode 100644 index 00000000000..158019e4d26 --- /dev/null +++ b/tasks/seq/filateva_e_number_sentences_line/src/ops_seq.cpp @@ -0,0 +1,38 @@ +// Filateva Elizaveta Number_of_sentences_per_line + +#include "seq/filateva_e_number_sentences_line/include/ops_seq.hpp" + +#include + +bool filateva_e_number_sentences_line_seq::NumberSentencesLine::pre_processing() { + internal_order_test(); + // Init value for input and output + line = std::string(std::move(reinterpret_cast(taskData->inputs[0]))); + sentence_count = 0; + return true; +} + +bool filateva_e_number_sentences_line_seq::NumberSentencesLine::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1; +} + +bool filateva_e_number_sentences_line_seq::NumberSentencesLine::run() { + internal_order_test(); + for (long unsigned int i = 0; i < line.size(); ++i) { + if (line[i] == '.' || line[i] == '?' || line[i] == '!') { + ++sentence_count; + } + } + if (!line.empty() && line.back() != '.' && line.back() != '?' && line.back() != '!') { + ++sentence_count; + } + return true; +} + +bool filateva_e_number_sentences_line_seq::NumberSentencesLine::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = sentence_count; + return true; +} From cc961098a44e5d173b71f853ac8c3c68668e8819 Mon Sep 17 00:00:00 2001 From: Andrey Chernykh <132923027+andreychh@users.noreply.github.com> Date: Wed, 30 Oct 2024 19:33:38 +0300 Subject: [PATCH 022/155] =?UTF-8?q?=D0=A7=D0=B5=D1=80=D0=BD=D1=8B=D1=85=20?= =?UTF-8?q?=D0=90=D0=BD=D0=B4=D1=80=D0=B5=D0=B9.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=205.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B5=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=87=D0=B5?= =?UTF-8?q?=D1=80=D0=B5=D0=B4=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B9=20=D0=B7?= =?UTF-8?q?=D0=BD=D0=B0=D0=BA=D0=BE=D0=B2=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B9=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8?= =?UTF-8?q?=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#22)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Задача нахождения числа чередований знаков значений соседних элементов вектора** заключается в том, чтобы подсчитать количество переходов между положительными и отрицательными числами в одномерном векторе целых чисел. Для корректного выполнения алгоритма входные данные должны содержать как минимум два элемента, а результат должен быть представлен единственным числом. Параллельная версия алгоритма использует библиотеку MPI для распределения вычислений между несколькими процессами. ### Описание последовательной задачи: 1. Выполняется проход по вектору от начала до предпоследнего элемента. 2. Для каждой пары соседних элементов проверяется, произошла ли смена знака. Это делается с помощью операции XOR (если результат меньше нуля, значит знаки разные). 3. Каждый раз, когда смена знака обнаружена, счетчик увеличивается на единицу. 4. По завершении прохода по вектору значение счетчика становится результатом выполнения задачи — общим количеством чередований знаков. ### Описание MPI задачи: 1. **Разделение данных:** Процесс с рангом 0 делит входной вектор на блоки. Все блоки, кроме последнего, включают первый элемент следующего блока, чтобы сохранить информацию о возможном чередовании знаков на границе блоков. Каждый блок отправляется соответствующему процессу. 2. **Вычисления:** Каждый процесс обрабатывает свой блок данных, подсчитывая количество чередований знаков внутри него. 3. **Сбор результатов:** Каждый процесс возвращает частичный результат (количество чередований знаков в его блоке) процессу 0 с помощью операции `reduce`, которая суммирует все частичные результаты. 4. **Обработка результата:** Процесс с рангом 0 собирает и возвращает итоговое количество чередований как результат выполнения задачи. --- .../func_tests/main.cpp | 336 ++++++++++++++++ .../include/ops_mpi.hpp | 37 ++ .../perf_tests/main.cpp | 359 ++++++++++++++++++ .../src/ops_mpi.cpp | 87 +++++ .../func_tests/main.cpp | 87 +++++ .../include/ops_seq.hpp | 22 ++ .../perf_tests/main.cpp | 90 +++++ .../src/ops_seq.cpp | 32 ++ 8 files changed, 1050 insertions(+) create mode 100644 tasks/mpi/chernykh_a_num_of_alternations_signs/func_tests/main.cpp create mode 100644 tasks/mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp create mode 100644 tasks/mpi/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp create mode 100644 tasks/mpi/chernykh_a_num_of_alternations_signs/src/ops_mpi.cpp create mode 100644 tasks/seq/chernykh_a_num_of_alternations_signs/func_tests/main.cpp create mode 100644 tasks/seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp create mode 100644 tasks/seq/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp create mode 100644 tasks/seq/chernykh_a_num_of_alternations_signs/src/ops_seq.cpp diff --git a/tasks/mpi/chernykh_a_num_of_alternations_signs/func_tests/main.cpp b/tasks/mpi/chernykh_a_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..f5917c25762 --- /dev/null +++ b/tasks/mpi/chernykh_a_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,336 @@ +#include + +#include +#include +#include + +#include "mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp" + +std::vector getRandomVector(size_t size) { + auto dev = std::random_device(); + auto gen = std::mt19937(dev()); + auto dist = std::uniform_int_distribution(-100'000, 100'000); + auto result = std::vector(size); + for (auto &val : result) { + val = dist(gen); + } + return result; +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, random_input) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = getRandomVector(100'000); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, large_random_input) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = getRandomVector(1'000'000); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, input_size_less_than_two_fails_validation) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + if (world.rank() == 0) { + ASSERT_FALSE(par_task.validation()); + } + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_FALSE(seq_task.validation()); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, output_size_not_equal_one_fails_validation) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = getRandomVector(1000); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + if (world.rank() == 0) { + ASSERT_FALSE(par_task.validation()); + } + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_FALSE(seq_task.validation()); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, all_elements_are_equal) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(1000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, sign_change_at_borders_of_two_chunks) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}; + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, sign_change_at_borders_of_three_chunks) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector{1, 1, 1, -1, -1, -1, 1, 1, 1}; + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp b/tasks/mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp new file mode 100644 index 00000000000..727cd9e1869 --- /dev/null +++ b/tasks/mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace chernykh_a_num_of_alternations_signs_mpi { + +class SequentialTask : public ppc::core::Task { + public: + explicit SequentialTask(std::shared_ptr task_data) : Task(std::move(task_data)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input; + int result{}; +}; + +class ParallelTask : public ppc::core::Task { + public: + explicit ParallelTask(std::shared_ptr task_data) : Task(std::move(task_data)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input, chunk; + int result{}; + boost::mpi::communicator world; +}; + +} // namespace chernykh_a_num_of_alternations_signs_mpi \ No newline at end of file diff --git a/tasks/mpi/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp b/tasks/mpi/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..6028f113f9f --- /dev/null +++ b/tasks/mpi/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,359 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp" + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_pipeline_run_with_input_size_10000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(10'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_pipeline_run_with_input_size_100000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(100'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_pipeline_run_with_input_size_1000000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(1'000'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_pipeline_run_with_input_size_10000000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(10'000'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_task_run_with_input_size_10000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(10'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->task_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_task_run_with_input_size_100000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(100'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->task_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_task_run_with_input_size_1000000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(1'000'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->task_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_task_run_with_input_size_10000000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(10'000'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->task_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} diff --git a/tasks/mpi/chernykh_a_num_of_alternations_signs/src/ops_mpi.cpp b/tasks/mpi/chernykh_a_num_of_alternations_signs/src/ops_mpi.cpp new file mode 100644 index 00000000000..7826137dc30 --- /dev/null +++ b/tasks/mpi/chernykh_a_num_of_alternations_signs/src/ops_mpi.cpp @@ -0,0 +1,87 @@ +#include "mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp" + +#include +#include + +bool chernykh_a_num_of_alternations_signs_mpi::SequentialTask::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 1; +} + +bool chernykh_a_num_of_alternations_signs_mpi::SequentialTask::pre_processing() { + internal_order_test(); + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + auto input_size = taskData->inputs_count[0]; + input = std::vector(input_ptr, input_ptr + input_size); + result = 0; + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::SequentialTask::run() { + internal_order_test(); + auto input_size = input.size(); + for (size_t i = 0; i < input_size - 1; i++) { + if ((input[i] ^ input[i + 1]) < 0) { + result++; + } + } + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::SequentialTask::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result; + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::ParallelTask::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::ParallelTask::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto input_size = taskData->inputs_count[0]; + auto chunk_size = input_size / world.size(); + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + input = std::vector(input_ptr, input_ptr + input_size); + chunk = std::vector(input_ptr, input_ptr + chunk_size + uint32_t(world.size() > 1)); + + for (int proc = 1; proc < world.size(); proc++) { + auto start = proc * chunk_size; + auto size = (proc == world.size() - 1) ? input_size - start : chunk_size + 1; + world.send(proc, 0, std::vector(input_ptr + start, input_ptr + start + size)); + } + } else { + world.recv(0, 0, chunk); + } + + result = 0; + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::ParallelTask::run() { + internal_order_test(); + auto chunk_result = 0; + auto chunk_size = chunk.size(); + for (size_t i = 0; i < chunk_size - 1; i++) { + if ((chunk[i] ^ chunk[i + 1]) < 0) { + chunk_result++; + } + } + boost::mpi::reduce(world, chunk_result, result, std::plus(), 0); + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::ParallelTask::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = result; + } + return true; +} diff --git a/tasks/seq/chernykh_a_num_of_alternations_signs/func_tests/main.cpp b/tasks/seq/chernykh_a_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..d74d50c11c1 --- /dev/null +++ b/tasks/seq/chernykh_a_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,87 @@ +#include + +#include + +#include "seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp" + +TEST(chernykh_a_num_of_alternations_signs_seq, correct_alternating_signs_count) { + // Create data + auto input = std::vector{3, -2, 4, -5, -1, 6}; + auto output = std::vector(1, 0); + auto want = 4; + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = chernykh_a_num_of_alternations_signs_seq::Task(task_data); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + ASSERT_EQ(want, output[0]); +} + +TEST(chernykh_a_num_of_alternations_signs_seq, input_size_less_than_two_fails_validation) { + // Create data + auto input = std::vector(); + auto output = std::vector(1, 0); + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = chernykh_a_num_of_alternations_signs_seq::Task(task_data); + + ASSERT_FALSE(task.validation()); +} + +TEST(chernykh_a_num_of_alternations_signs_seq, output_size_not_equal_one_fails_validation) { + // Create data + auto input = std::vector{3, -2, 4, -5, -1, 6}; + auto output = std::vector(); + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = chernykh_a_num_of_alternations_signs_seq::Task(task_data); + + ASSERT_FALSE(task.validation()); +} + +TEST(chernykh_a_num_of_alternations_signs_seq, all_elements_are_equal) { + // Create data + auto input = std::vector(5, 0); + auto output = std::vector(1, 0); + auto want = 0; + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = chernykh_a_num_of_alternations_signs_seq::Task(task_data); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + ASSERT_EQ(want, output[0]); +} diff --git a/tasks/seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp b/tasks/seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp new file mode 100644 index 00000000000..e029d9e5017 --- /dev/null +++ b/tasks/seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace chernykh_a_num_of_alternations_signs_seq { + +class Task : public ppc::core::Task { + public: + explicit Task(std::shared_ptr task_data) : ppc::core::Task(std::move(task_data)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input; + int result{}; +}; + +} // namespace chernykh_a_num_of_alternations_signs_seq \ No newline at end of file diff --git a/tasks/seq/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp b/tasks/seq/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..d1f45630c8f --- /dev/null +++ b/tasks/seq/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,90 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp" + +TEST(chernykh_a_num_of_alternations_signs_seq, test_pipeline_run) { + // Create data + auto input = std::vector(10'000'000, 0); + auto output = std::vector(1, 0); + auto want = 0; + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = std::make_shared(task_data); + + ASSERT_TRUE(task->validation()); + ASSERT_TRUE(task->pre_processing()); + ASSERT_TRUE(task->run()); + ASSERT_TRUE(task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = std::chrono::high_resolution_clock::now(); + perf_attributes->current_timer = [&] { + auto current = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current - start).count(); + return static_cast(duration) * 1e-9; + }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(want, output[0]); +} + +TEST(chernykh_a_num_of_alternations_signs_seq, test_task_run) { + // Create data + auto input = std::vector(10'000'000, 0); + auto output = std::vector(1, 0); + auto want = 0; + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = std::make_shared(task_data); + + ASSERT_TRUE(task->validation()); + ASSERT_TRUE(task->pre_processing()); + ASSERT_TRUE(task->run()); + ASSERT_TRUE(task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = std::chrono::high_resolution_clock::now(); + perf_attributes->current_timer = [&] { + auto current = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current - start).count(); + return static_cast(duration) * 1e-9; + }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(task); + + perf_analyzer->task_run(perf_attributes, perf_results); + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(want, output[0]); +} \ No newline at end of file diff --git a/tasks/seq/chernykh_a_num_of_alternations_signs/src/ops_seq.cpp b/tasks/seq/chernykh_a_num_of_alternations_signs/src/ops_seq.cpp new file mode 100644 index 00000000000..b568b80e053 --- /dev/null +++ b/tasks/seq/chernykh_a_num_of_alternations_signs/src/ops_seq.cpp @@ -0,0 +1,32 @@ +#include "seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp" + +bool chernykh_a_num_of_alternations_signs_seq::Task::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 1; +} + +bool chernykh_a_num_of_alternations_signs_seq::Task::pre_processing() { + internal_order_test(); + auto *input_ptr = reinterpret_cast(taskData->inputs[0]); + auto input_size = taskData->inputs_count[0]; + input = std::vector(input_ptr, input_ptr + input_size); + result = 0; + return true; +} + +bool chernykh_a_num_of_alternations_signs_seq::Task::run() { + internal_order_test(); + auto input_size = input.size(); + for (size_t i = 0; i < input_size - 1; i++) { + if ((input[i] ^ input[i + 1]) < 0) { + result++; + } + } + return true; +} + +bool chernykh_a_num_of_alternations_signs_seq::Task::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result; + return true; +} From f1a25d1622eb53554ec714226478f2202dd6572c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=92=D0=BB=D0=B0=D0=B4=D0=B8=D1=81=D0=BB=D0=B0=D0=B2?= <125855952+WladislawFilatew@users.noreply.github.com> Date: Wed, 30 Oct 2024 19:34:46 +0300 Subject: [PATCH 023/155] =?UTF-8?q?=D0=A4=D0=B8=D0=BB=D0=B0=D1=82=D1=8C?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=92=D0=BB=D0=B0=D0=B4=D0=B8=D1=81=D0=BB=D0=B0?= =?UTF-8?q?=D0=B2.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2010.=20=D0=A1=D1=83=D0=BC?= =?UTF-8?q?=D0=BC=D0=B0=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#43)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Матрица представлена в виде вектор целых чисел (развернутая матрица). **Описание последовательной задачи** - Последовательное сложение всех элементов матрицы. **Описание MPI задачи** - Матрица целых чисел разбивается на равные части. Каждый процесс получает свою часть матрицы и находит сумму данной части. Затем все процессы отправляют свои результаты главному процессу, который собирает их и определяет окончательную сумму для всей матрицы. --------- Co-authored-by: WladislawFilatew --- .../func_tests/main.cpp | 293 ++++++++++++++++++ .../include/ops_mpi.hpp | 49 +++ .../perf_tests/main.cpp | 284 +++++++++++++++++ .../src/ops_mpi.cpp | 104 +++++++ .../func_tests/main.cpp | 178 +++++++++++ .../include/ops_seq.hpp | 29 ++ .../perf_tests/main.cpp | 87 ++++++ .../src/ops_seq.cpp | 44 +++ 8 files changed, 1068 insertions(+) create mode 100644 tasks/mpi/filatev_v_sum_of_matrix_elements/func_tests/main.cpp create mode 100644 tasks/mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/filatev_v_sum_of_matrix_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/filatev_v_sum_of_matrix_elements/func_tests/main.cpp create mode 100644 tasks/seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp create mode 100644 tasks/seq/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp create mode 100644 tasks/seq/filatev_v_sum_of_matrix_elements/src/ops_seq.cpp diff --git a/tasks/mpi/filatev_v_sum_of_matrix_elements/func_tests/main.cpp b/tasks/mpi/filatev_v_sum_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..7ed222c75cc --- /dev/null +++ b/tasks/mpi/filatev_v_sum_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,293 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include + +#include +#include +#include +#include + +#include "mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp" + +std::vector> getRandomMatrix(int size_n, int size_m) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> matrix(size_m, std::vector(size_n)); + + for (int i = 0; i < size_m; ++i) { + for (int j = 0; j < size_n; ++j) { + matrix[i][j] = gen() % 200 - 100; + } + } + return matrix; +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_10_10_1) { + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 10; + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixparallel(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel.validation(), true); + sumMatrixparallel.pre_processing(); + sumMatrixparallel.run(); + sumMatrixparallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(100, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_10_10_r) { + boost::mpi::communicator world; + const int count = 10; + std::vector out; + std::vector> in; + std::vector> refIn; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = getRandomMatrix(count, count); + refIn = in; + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixParallel(taskDataPar, world); + ASSERT_EQ(sumMatrixParallel.validation(), true); + sumMatrixParallel.pre_processing(); + sumMatrixParallel.run(); + sumMatrixParallel.post_processing(); + + if (world.rank() == 0) { + std::vector refOut; + // Create TaskData + std::shared_ptr TaskDataSeq = std::make_shared(); + refOut = std::vector(1, 0); + for (int i = 0; i < count; i++) { + TaskDataSeq->inputs.emplace_back(reinterpret_cast(refIn[i].data())); + } + TaskDataSeq->inputs_count.emplace_back(count); + TaskDataSeq->inputs_count.emplace_back(count); + TaskDataSeq->outputs.emplace_back(reinterpret_cast(refOut.data())); + TaskDataSeq->outputs_count.emplace_back(1); + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq sumMatriSeq(TaskDataSeq); + ASSERT_EQ(sumMatriSeq.validation(), true); + sumMatriSeq.pre_processing(); + sumMatriSeq.run(); + sumMatriSeq.post_processing(); + + ASSERT_EQ(out[0], refOut[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_10_20_r) { + boost::mpi::communicator world; + const int size_m = 10; + const int size_n = 20; + std::vector out; + std::vector> in; + std::vector> refIn; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = getRandomMatrix(size_n, size_m); + refIn = in; + out = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(size_n); + taskDataPar->inputs_count.emplace_back(size_m); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixParallel(taskDataPar, world); + ASSERT_EQ(sumMatrixParallel.validation(), true); + sumMatrixParallel.pre_processing(); + sumMatrixParallel.run(); + sumMatrixParallel.post_processing(); + + if (world.rank() == 0) { + std::vector refOut; + // Create TaskData + std::shared_ptr TaskDataSeq = std::make_shared(); + refOut = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + TaskDataSeq->inputs.emplace_back(reinterpret_cast(refIn[i].data())); + } + TaskDataSeq->inputs_count.emplace_back(size_n); + TaskDataSeq->inputs_count.emplace_back(size_m); + TaskDataSeq->outputs.emplace_back(reinterpret_cast(refOut.data())); + TaskDataSeq->outputs_count.emplace_back(1); + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq sumMatriSeq(TaskDataSeq); + ASSERT_EQ(sumMatriSeq.validation(), true); + sumMatriSeq.pre_processing(); + sumMatriSeq.run(); + sumMatriSeq.post_processing(); + + ASSERT_EQ(out[0], refOut[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_20_10_r) { + boost::mpi::communicator world; + const int size_m = 20; + const int size_n = 10; + std::vector out; + std::vector> in; + std::vector> refIn; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = getRandomMatrix(size_n, size_m); + refIn = in; + out = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(size_n); + taskDataPar->inputs_count.emplace_back(size_m); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixParallel(taskDataPar, world); + ASSERT_EQ(sumMatrixParallel.validation(), true); + sumMatrixParallel.pre_processing(); + sumMatrixParallel.run(); + sumMatrixParallel.post_processing(); + + if (world.rank() == 0) { + std::vector refOut; + // Create TaskData + std::shared_ptr TaskDataSeq = std::make_shared(); + refOut = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + TaskDataSeq->inputs.emplace_back(reinterpret_cast(refIn[i].data())); + } + TaskDataSeq->inputs_count.emplace_back(size_n); + TaskDataSeq->inputs_count.emplace_back(size_m); + TaskDataSeq->outputs.emplace_back(reinterpret_cast(refOut.data())); + TaskDataSeq->outputs_count.emplace_back(1); + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq sumMatriSeq(TaskDataSeq); + ASSERT_EQ(sumMatriSeq.validation(), true); + sumMatriSeq.pre_processing(); + sumMatriSeq.run(); + sumMatriSeq.post_processing(); + + ASSERT_EQ(out[0], refOut[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_1_1_r) { + boost::mpi::communicator world; + const int size_m = 1; + const int size_n = 1; + std::vector out; + std::vector> in; + std::vector> refIn; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = getRandomMatrix(size_n, size_m); + refIn = in; + out = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(size_n); + taskDataPar->inputs_count.emplace_back(size_m); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixParallel(taskDataPar, world); + ASSERT_EQ(sumMatrixParallel.validation(), true); + sumMatrixParallel.pre_processing(); + sumMatrixParallel.run(); + sumMatrixParallel.post_processing(); + + if (world.rank() == 0) { + std::vector refOut; + // Create TaskData + std::shared_ptr TaskDataSeq = std::make_shared(); + refOut = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + TaskDataSeq->inputs.emplace_back(reinterpret_cast(refIn[i].data())); + } + TaskDataSeq->inputs_count.emplace_back(size_n); + TaskDataSeq->inputs_count.emplace_back(size_m); + TaskDataSeq->outputs.emplace_back(reinterpret_cast(refOut.data())); + TaskDataSeq->outputs_count.emplace_back(1); + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq sumMatriSeq(TaskDataSeq); + ASSERT_EQ(sumMatriSeq.validation(), true); + sumMatriSeq.pre_processing(); + sumMatriSeq.run(); + sumMatriSeq.post_processing(); + + ASSERT_EQ(out[0], refOut[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Empty_Matrix) { + boost::mpi::communicator world; + const int count = 0; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixparallel(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel.validation(), true); + sumMatrixparallel.pre_processing(); + sumMatrixparallel.run(); + sumMatrixparallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(0, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp b/tasks/mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..734168deffb --- /dev/null +++ b/tasks/mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Filatev Vladislav Sum_of_matrix_elements +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace filatev_v_sum_of_matrix_elements_mpi { + +class SumMatrixSeq : public ppc::core::Task { + public: + explicit SumMatrixSeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix; + long long summ = 0; + int size_n, size_m; +}; + +class SumMatrixParallel : public ppc::core::Task { + public: + explicit SumMatrixParallel(std::shared_ptr taskData_, boost::mpi::communicator world) + : Task(std::move(taskData_)), world(std::move(world)) {}; + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix; + long long summ = 0; + std::vector local_vector; + int size_n, size_m; + boost::mpi::communicator world; +}; + +} // namespace filatev_v_sum_of_matrix_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp b/tasks/mpi/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..27f65019e1d --- /dev/null +++ b/tasks/mpi/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,284 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp" + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_pipeline_run_2000) { + const int count = 2000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_task_run_2000) { + const int count = 2000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 30; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_pipeline_run_3000) { + const int count = 3000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_task_run_3000) { + const int count = 3000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 30; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_pipeline_run_4000) { + const int count = 4000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_task_run_4000) { + const int count = 4000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 30; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/filatev_v_sum_of_matrix_elements/src/ops_mpi.cpp b/tasks/mpi/filatev_v_sum_of_matrix_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..bbcdc4b0779 --- /dev/null +++ b/tasks/mpi/filatev_v_sum_of_matrix_elements/src/ops_mpi.cpp @@ -0,0 +1,104 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include "mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq::pre_processing() { + internal_order_test(); + + summ = 0; + size_n = taskData->inputs_count[0]; + size_m = taskData->inputs_count[1]; + + for (int i = 0; i < size_m; ++i) { + auto* temp = reinterpret_cast(taskData->inputs[i]); + + matrix.insert(matrix.end(), temp, temp + size_n); + } + + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0 && taskData->outputs_count[0] == 1; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq::run() { + internal_order_test(); + + summ = std::accumulate(matrix.begin(), matrix.end(), 0); + + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = summ; + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + size_n = taskData->inputs_count[0]; + size_m = taskData->inputs_count[1]; + + for (int i = 0; i < size_m; ++i) { + auto* temp = reinterpret_cast(taskData->inputs[i]); + + matrix.insert(matrix.end(), temp, temp + size_n); + } + } + summ = 0; + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel::run() { + internal_order_test(); + int delta = 0; + int ras = 0; + + if (world.rank() == 0 && world.size() > 1) { + ras = (size_n * size_m) % (world.size() - 1); + delta = (size_n * size_m) / (world.size() - 1); + } else if (world.rank() == 0 && world.size() == 1) { + ras = (size_n * size_m); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + for (int proc = 0; proc < (world.size() - 1); proc++) { + world.send(proc + 1, 0, matrix.data() + proc * delta + ras, delta); + } + local_vector = std::vector(matrix.begin(), matrix.begin() + ras); + } else { + local_vector = std::vector(delta); + world.recv(0, 0, local_vector.data(), delta); + } + long long local_summ = std::accumulate(local_vector.begin(), local_vector.end(), 0); + reduce(world, local_summ, summ, std::plus(), 0); + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = summ; + } + return true; +} diff --git a/tasks/seq/filatev_v_sum_of_matrix_elements/func_tests/main.cpp b/tasks/seq/filatev_v_sum_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..4a8f0621500 --- /dev/null +++ b/tasks/seq/filatev_v_sum_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,178 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include + +#include + +#include "seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp" + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_10_10_1) { + const int count = 10; + + // Create data + std::vector> in(count, std::vector(count, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < count; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(100, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_10_20_1) { + const int size_m = 10; + const int size_n = 20; + + // Create data + std::vector> in(size_m, std::vector(size_n, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < size_m; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(size_n); + taskDataSeq->inputs_count.emplace_back(size_m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(200, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_20_10_1) { + const int size_m = 20; + const int size_n = 10; + + // Create data + std::vector> in(size_m, std::vector(size_n, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < size_m; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(size_n); + taskDataSeq->inputs_count.emplace_back(size_m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(200, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_1_1_1) { + const int size_m = 1; + const int size_n = 1; + + // Create data + std::vector> in(size_m, std::vector(size_n, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < size_m; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(size_n); + taskDataSeq->inputs_count.emplace_back(size_m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(1, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_10_20_different) { + const int size_m = 10; + const int size_n = 20; + + // Create data + std::vector> in(size_m, std::vector(size_n, 1)); + std::vector out(1, 0); + + for (int i = 0; i < size_m; ++i) { + for (int j = 0; j < size_n; ++j) { + in[i][j] = (i * size_n + j + 1); + } + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < size_m; ++i) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(size_n); + taskDataSeq->inputs_count.emplace_back(size_m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(20100, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Empty_Matrix) { + const int count = 0; + + // Create data + std::vector> in(count, std::vector(count, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < count; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(0, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp b/tasks/seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..83df1ea99c6 --- /dev/null +++ b/tasks/seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp @@ -0,0 +1,29 @@ +// Filatev Vladislav Sum_of_matrix_elements +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace filatev_v_sum_of_matrix_elements_seq { + +long long sumVector(std::vector vector); +std::vector> getRandomMatrix(int size_n, int size_m); + +class SumMatrix : public ppc::core::Task { + public: + explicit SumMatrix(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix; + long long summ = 0; + int size_n, size_m; +}; + +} // namespace filatev_v_sum_of_matrix_elements_seq \ No newline at end of file diff --git a/tasks/seq/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp b/tasks/seq/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..d32c3b2d1b3 --- /dev/null +++ b/tasks/seq/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,87 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp" + +TEST(filatev_v_sum_of_matrix_elements, test_pipeline_run) { + const int count = 10000; + + // Create data + std::vector> in(count, std::vector(count, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < count; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + auto sumMatrix = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrix); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(100000000, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements, test_task_run) { + const int count = 10000; + + // Create data + std::vector> in(count, std::vector(count, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < count; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + auto sumMatrix = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrix); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(100000000, out[0]); +} diff --git a/tasks/seq/filatev_v_sum_of_matrix_elements/src/ops_seq.cpp b/tasks/seq/filatev_v_sum_of_matrix_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..2f12ec2e3a6 --- /dev/null +++ b/tasks/seq/filatev_v_sum_of_matrix_elements/src/ops_seq.cpp @@ -0,0 +1,44 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include "seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp" + +bool filatev_v_sum_of_matrix_elements_seq::SumMatrix::pre_processing() { + internal_order_test(); + + summ = 0; + size_n = taskData->inputs_count[0]; + size_m = taskData->inputs_count[1]; + matrix = std::vector(size_m * size_n); + + for (int i = 0; i < size_m; ++i) { + auto* temp = reinterpret_cast(taskData->inputs[i]); + + for (int j = 0; j < size_n; ++j) { + matrix[i * size_n + j] = temp[j]; + } + } + + return true; +} + +bool filatev_v_sum_of_matrix_elements_seq::SumMatrix::validation() { + internal_order_test(); + + return taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && taskData->outputs_count[0] == 1; +} + +bool filatev_v_sum_of_matrix_elements_seq::SumMatrix::run() { + internal_order_test(); + + for (long unsigned int i = 0; i < matrix.size(); ++i) { + summ += matrix[i]; + } + + return true; +} + +bool filatev_v_sum_of_matrix_elements_seq::SumMatrix::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = summ; + return true; +} From ba5fca86b9f45af0445bb200f15c3261ea77291e Mon Sep 17 00:00:00 2001 From: AndreySorokin7 <129724280+AndreySorokin7@users.noreply.github.com> Date: Thu, 31 Oct 2024 03:25:45 +0300 Subject: [PATCH 024/155] Fix tests with number of processes: 3, 5, 7, 8 (#72) ![image](https://github.com/user-attachments/assets/ee0be9fe-2c9c-4979-9a72-32c8f2fc214f) Co-authored-by: AndreySorokin7 --- .../src/ops_mpi.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp index 7d509804204..58bd3d8f46c 100644 --- a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp +++ b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp @@ -52,8 +52,10 @@ bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential:: bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::pre_processing() { internal_order_test(); unsigned int delta = 0; + unsigned int remainder = 0; if (world.rank() == 0) { delta = taskData->inputs_count[1] / world.size(); + remainder = taskData->inputs_count[1] % world.size(); } broadcast(world, delta, 0); @@ -67,15 +69,15 @@ bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::pr } } for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, input_[0].data() + delta * proc, delta); - world.send(proc, 1, input_[1].data() + delta * proc, delta); + world.send(proc, 0, input_[0].data() + delta * proc + remainder, delta); + world.send(proc, 1, input_[1].data() + delta * proc + remainder, delta); } } local_input1_ = std::vector(delta); local_input2_ = std::vector(delta); if (world.rank() == 0) { - local_input1_ = std::vector(input_[0].begin(), input_[0].begin() + delta); - local_input2_ = std::vector(input_[1].begin(), input_[1].begin() + delta); + local_input1_ = std::vector(input_[0].begin(), input_[0].begin() + delta + remainder); + local_input2_ = std::vector(input_[1].begin(), input_[1].begin() + delta + remainder); } else { world.recv(0, 0, local_input1_.data(), delta); world.recv(0, 1, local_input2_.data(), delta); From daf24992ea71d6d9d4e361fa8703b0281c590354 Mon Sep 17 00:00:00 2001 From: "Michael K." <130953568+kmichaelk@users.noreply.github.com> Date: Thu, 31 Oct 2024 03:26:01 +0300 Subject: [PATCH 025/155] Add Visual Studio files to .gitignore (#73) --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index e87fe912cdf..11a9324ee55 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,15 @@ build +out mpich cmake-build-release* cmake-build-debug* .idea/ +.vs/ .vscode/ scripts/variants.csv scripts/variants.xlsx venv* sln/ +CMakeSettings.json .DS_Store .cache From 9785ddc91f0da2d0430f68f8fde0a632ffc95d3f Mon Sep 17 00:00:00 2001 From: "Michael K." <130953568+kmichaelk@users.noreply.github.com> Date: Thu, 31 Oct 2024 03:26:17 +0300 Subject: [PATCH 026/155] Link against boost::serialization (#74) Solutions depending on `boost::serialization` are failing to build with gcc and clang, while building without any issues with MSVC. In relation to #10, `boost::serialization` is a transitive dependency of `boost::mpi` and is still being built, [linking with serialization library is recommended in boost docs](https://www.boost.org/doc/libs/1_77_0/doc/html/mpi/getting_started.html#mpi.getting_started.using). Fixes build of #63 --- tasks/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks/CMakeLists.txt b/tasks/CMakeLists.txt index 2af1a3f6206..3fe79f18ec6 100644 --- a/tasks/CMakeLists.txt +++ b/tasks/CMakeLists.txt @@ -98,7 +98,7 @@ foreach(TASK_TYPE ${LIST_OF_TASKS}) add_dependencies(${EXEC_FUNC} ppc_boost) target_link_directories(${EXEC_FUNC} PUBLIC ${CMAKE_BINARY_DIR}/ppc_boost/install/lib) if (NOT MSVC) - target_link_libraries(${EXEC_FUNC} PUBLIC boost_mpi) + target_link_libraries(${EXEC_FUNC} PUBLIC boost_mpi boost_serialization) endif () elseif ("${MODULE_NAME}" STREQUAL "tbb") add_dependencies(${EXEC_FUNC} ppc_onetbb) From 9fb18622ef862e3f407db11cdeb5ed3f6ce8c9e2 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 31 Oct 2024 01:34:55 +0100 Subject: [PATCH 027/155] Add debug tests and try to find bad tests (#82) --- scripts/run.sh | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/scripts/run.sh b/scripts/run.sh index 48fb3b0a781..048adc68708 100644 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -37,13 +37,17 @@ fi #fi #echo "NUM_PROC: " $NUM_PROC -if [[ -z "$ASAN_RUN" ]]; then - if [[ $OSTYPE == "linux-gnu" ]]; then - mpirun --oversubscribe -np 4 ./build/bin/mpi_func_tests --gtest_also_run_disabled_tests --gtest_repeat=10 --gtest_recreate_environments_when_repeating - elif [[ $OSTYPE == "darwin"* ]]; then - mpirun -np 2 ./build/bin/mpi_func_tests --gtest_also_run_disabled_tests --gtest_repeat=10 --gtest_recreate_environments_when_repeating +# separate tests for debug +for test_item in $(./build/bin/mpi_func_tests --gtest_list_tests | awk '/\./{ SUITE=$1 } / / { print SUITE $1 }') +do + if [[ -z "$ASAN_RUN" ]]; then + if [[ $OSTYPE == "linux-gnu" ]]; then + mpirun --oversubscribe -np 4 ./build/bin/mpi_func_tests --gtest_filter="$test_item" --gtest_repeat=10 + elif [[ $OSTYPE == "darwin"* ]]; then + mpirun -np 2 ./build/bin/mpi_func_tests --gtest_filter="$test_item" --gtest_repeat=10 + fi fi -fi +done ./build/bin/omp_func_tests --gtest_also_run_disabled_tests --gtest_repeat=10 --gtest_recreate_environments_when_repeating ./build/bin/seq_func_tests --gtest_also_run_disabled_tests --gtest_repeat=10 --gtest_recreate_environments_when_repeating From 7abd342257653ef75ae58c21d307c3f73fbd701e Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 31 Oct 2024 01:37:56 +0100 Subject: [PATCH 028/155] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 265cbb8364f..c015df52932 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ Navigate to a source code folder. ``` mkdir build && cd build - cmake -D USE_SEQ=ON -D USE_MPI=ON -D USE_OMP=ON -D USE_TBB=ON -D USE_STL=ON -D USE_FUNC_TESTS=ON -D USE_PERF_TESTS=ON -D USE_CPPCHECK=ON -D CMAKE_BUILD_TYPE=Release .. + cmake -D USE_SEQ=ON -D USE_MPI=ON -D USE_OMP=ON -D USE_TBB=ON -D USE_STL=ON -D USE_FUNC_TESTS=ON -D USE_PERF_TESTS=ON -D CMAKE_BUILD_TYPE=Release .. ``` *Help on CMake keys:* - `-D USE_SEQ=ON` enable `Sequential` labs (based on OpenMP's CMakeLists.txt). From 920488d0418d0c7254d27ec12f523313a5749c9d Mon Sep 17 00:00:00 2001 From: Anastasia Rezantseva <62182112+AnastasiaRezantseva@users.noreply.github.com> Date: Thu, 31 Oct 2024 03:39:05 +0300 Subject: [PATCH 029/155] =?UTF-8?q?=D0=A0=D0=B5=D0=B7=D0=B0=D0=BD=D1=86?= =?UTF-8?q?=D0=B5=D0=B2=D0=B0=20=D0=90=D0=BD=D0=B0=D1=81=D1=82=D0=B0=D1=81?= =?UTF-8?q?=D0=B8=D1=8F.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20?= =?UTF-8?q?=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=209.=20=D0=A1=D0=BA?= =?UTF-8?q?=D0=B0=D0=BB=D1=8F=D1=80=D0=BD=D0=BE=D0=B5=20=D0=BF=D1=80=D0=BE?= =?UTF-8?q?=D0=B8=D0=B7=D0=B2=D0=B5=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=B2?= =?UTF-8?q?=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=BE=D0=B2.=20(#32)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи Выполняется поэлементное умножение двух векторов, результаты умножения суммируются для получения итогового значения скалярного произведения Описание MPI задачи Двумерный вектор целых чисел (std::vector>), который содержит два входных вектора, которые затем делятся на равные сегменты в зависимости от количества процессов. Затем эти сегменты отправляются в соответствующие процессы для дальнейшей обработки. Каждый процесс получает свой сегмент данных и происходит поэлементное умножение двух векторов, результаты перемножения суммируются. После этого результаты со всех процессов собираются на корневом процессе, где происходит их суммирование для получения итогового значения. --- .../func_tests/main.cpp | 301 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 +++ .../perf_tests/main.cpp | 110 +++++++ .../src/ops_mpi.cpp | 141 ++++++++ .../func_tests/main.cpp | 225 +++++++++++++ .../include/ops_seq.hpp | 25 ++ .../perf_tests/main.cpp | 106 ++++++ .../src/ops_seq.cpp | 48 +++ 8 files changed, 1004 insertions(+) create mode 100644 tasks/mpi/rezantseva_a_vector_dot_product/func_tests/main.cpp create mode 100644 tasks/mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp create mode 100644 tasks/mpi/rezantseva_a_vector_dot_product/perf_tests/main.cpp create mode 100644 tasks/mpi/rezantseva_a_vector_dot_product/src/ops_mpi.cpp create mode 100644 tasks/seq/rezantseva_a_vector_dot_product/func_tests/main.cpp create mode 100644 tasks/seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp create mode 100644 tasks/seq/rezantseva_a_vector_dot_product/perf_tests/main.cpp create mode 100644 tasks/seq/rezantseva_a_vector_dot_product/src/ops_seq.cpp diff --git a/tasks/mpi/rezantseva_a_vector_dot_product/func_tests/main.cpp b/tasks/mpi/rezantseva_a_vector_dot_product/func_tests/main.cpp new file mode 100644 index 00000000000..5a08a249f45 --- /dev/null +++ b/tasks/mpi/rezantseva_a_vector_dot_product/func_tests/main.cpp @@ -0,0 +1,301 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp" + +static int offset = 0; + +std::vector createRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(rezantseva_a_vector_dot_product_mpi, can_scalar_multiply_vec_size_125) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 125; + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, can_scalar_multiply_vec_size_300) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 300; + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_vectors_not_equal) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector + 5); + + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } + // Create Task +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_vectors_equal_true) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + } + // Create Task +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_vectorDotProduct_right) { + // Create data + std::vector v1 = {1, 2, 5}; + std::vector v2 = {4, 7, 8}; + ASSERT_EQ(58, rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2)); +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_run_right_size_5) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = {1, 2, 5, 6, 3}; + std::vector v2 = {4, 7, 8, 9, 5}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2), res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_run_right_size_3) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = {1, 2, 5}; + std::vector v2 = {4, 7, 8}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(58, res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_run_right_size_7) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = {1, 2, 5, 14, 21, 16, 11}; + std::vector v2 = {4, 7, 8, 12, 31, 25, 9}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2), res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_run_right_empty) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = {0, 0, 0}; + std::vector v2 = {0, 0, 0}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2), res[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp b/tasks/mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp new file mode 100644 index 00000000000..40945cf4f8b --- /dev/null +++ b/tasks/mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace rezantseva_a_vector_dot_product_mpi { +int vectorDotProduct(const std::vector& v1, const std::vector& v2); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_{}; + std::vector local_input1_{}, local_input2_{}; + std::vector counts_{}; + size_t num_processes_ = 0; + int res{}; + boost::mpi::communicator world; +}; + +} // namespace rezantseva_a_vector_dot_product_mpi \ No newline at end of file diff --git a/tasks/mpi/rezantseva_a_vector_dot_product/perf_tests/main.cpp b/tasks/mpi/rezantseva_a_vector_dot_product/perf_tests/main.cpp new file mode 100644 index 00000000000..fef7888a861 --- /dev/null +++ b/tasks/mpi/rezantseva_a_vector_dot_product/perf_tests/main.cpp @@ -0,0 +1,110 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp" + +static int offset = 0; +const int count_size_vector = 49000000; + +std::vector createRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(rezantseva_a_vector_dot_product_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector> global_vec; + + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + + std::vector res(1, 0); + global_vec = {v1, v2}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + // Create and init perf results + auto perfResults = std::make_shared(); + int answer = rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(answer, res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + global_vec = {v1, v2}; + + if (world.rank() == 0) { + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + // int answer = res[0]; + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} diff --git a/tasks/mpi/rezantseva_a_vector_dot_product/src/ops_mpi.cpp b/tasks/mpi/rezantseva_a_vector_dot_product/src/ops_mpi.cpp new file mode 100644 index 00000000000..8f6acb58a94 --- /dev/null +++ b/tasks/mpi/rezantseva_a_vector_dot_product/src/ops_mpi.cpp @@ -0,0 +1,141 @@ +// Copyright 2024 Nesterov Alexander +#include "mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp" + +int rezantseva_a_vector_dot_product_mpi::vectorDotProduct(const std::vector& v1, const std::vector& v2) { + long long result = 0; + for (size_t i = 0; i < v1.size(); i++) result += v1[i] * v2[i]; + return result; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->outputs.size() == taskData->outputs_count.size()) && taskData->outputs.size() == 1 && + taskData->outputs_count[0] == 1; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + input_ = std::vector>(taskData->inputs.size()); + for (size_t i = 0; i < input_.size(); i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + for (size_t j = 0; j < taskData->inputs_count[i]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res = 0; + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_[0].size(); i++) { + res += input_[0][i] * input_[1][i]; + } + + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->outputs.size() == taskData->outputs_count.size()) && taskData->outputs.size() == 1 && + taskData->outputs_count[0] == 1; + } + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + size_t total_elements = 0; + size_t delta = 0; + size_t remainder = 0; + + if (world.rank() == 0) { + total_elements = taskData->inputs_count[0]; + num_processes_ = world.size(); + delta = total_elements / num_processes_; // Calculate base size for each process + remainder = total_elements % num_processes_; // Calculate remaining elements + } + boost::mpi::broadcast(world, num_processes_, 0); + + counts_.resize(num_processes_); // Vector to store counts for each process + + if (world.rank() == 0) { + // Distribute sizes to each process + for (unsigned int i = 0; i < num_processes_; ++i) { + counts_[i] = delta + (i < remainder ? 1 : 0); // Assign 1 additional element to the first 'remainder' processes + } + } + boost::mpi::broadcast(world, counts_.data(), num_processes_, 0); + + if (world.rank() == 0) { + input_ = std::vector>(taskData->inputs.size()); + for (size_t i = 0; i < input_.size(); i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + for (size_t j = 0; j < taskData->inputs_count[i]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + } + + res = 0; + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + if (world.rank() == 0) { + size_t offset_remainder = counts_[0]; + for (unsigned int proc = 1; proc < num_processes_; proc++) { + size_t current_count = counts_[proc]; + world.send(proc, 0, input_[0].data() + offset_remainder, current_count); + world.send(proc, 1, input_[1].data() + offset_remainder, current_count); + offset_remainder += current_count; + } + } + + local_input1_ = std::vector(counts_[world.rank()]); + local_input2_ = std::vector(counts_[world.rank()]); + + if (world.rank() > 0) { + world.recv(0, 0, local_input1_.data(), counts_[world.rank()]); + world.recv(0, 1, local_input2_.data(), counts_[world.rank()]); + } else { + local_input1_ = std::vector(input_[0].begin(), input_[0].begin() + counts_[0]); + local_input2_ = std::vector(input_[1].begin(), input_[1].begin() + counts_[0]); + } + + int local_res = 0; + + for (size_t i = 0; i < local_input1_.size(); i++) { + local_res += local_input1_[i] * local_input2_[i]; + } + boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/rezantseva_a_vector_dot_product/func_tests/main.cpp b/tasks/seq/rezantseva_a_vector_dot_product/func_tests/main.cpp new file mode 100644 index 00000000000..ccbf17a7dc7 --- /dev/null +++ b/tasks/seq/rezantseva_a_vector_dot_product/func_tests/main.cpp @@ -0,0 +1,225 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp" +static int offset = 0; + +std::vector createRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(rezantseva_a_vector_dot_product_seq, can_scalar_multiply_vec_size_10) { + const int count = 10; + // Create data + std::vector out(1, 0); + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} + +TEST(rezantseva_a_vector_dot_product_seq, can_scalar_multiply_vec_size_100) { + const int count = 100; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} + +TEST(rezantseva_a_vector_dot_product_seq, check_none_equal_size_of_vec) { + const int count = 10; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count + 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(rezantseva_a_vector_dot_product_seq, check_equal_size_of_vec) { + const int count = 10; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); +} + +TEST(rezantseva_a_vector_dot_product_seq, check_empty_vec_product_func) { + const int count = 0; + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(0, answer); +} + +TEST(rezantseva_a_vector_dot_product_seq, check_empty_vec_product_run) { + const int count = 0; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} + +TEST(rezantseva_a_vector_dot_product_seq, v1_dot_product_v2_equal_v2_dot_product_v1) { + const int count = 50; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v2, v1); + ASSERT_EQ(answer, out[0]); +} +TEST(rezantseva_a_vector_dot_product_seq, check_run_right) { + // Create data + std::vector out(1, 0); + + std::vector v1 = {1, 2, 5}; + std::vector v2 = {4, 7, 8}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(58, out[0]); +} +TEST(rezantseva_a_vector_dot_product_seq, check_vectorDotProduct_right) { + // Create data + std::vector v1 = {1, 2, 5}; + std::vector v2 = {4, 7, 8}; + ASSERT_EQ(58, rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2)); +} diff --git a/tasks/seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp b/tasks/seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp new file mode 100644 index 00000000000..cf0d69b6ad8 --- /dev/null +++ b/tasks/seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp @@ -0,0 +1,25 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace rezantseva_a_vector_dot_product_seq { +int vectorDotProduct(const std::vector& v1, const std::vector& v2); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int res{}; + std::vector> input_; +}; + +} // namespace rezantseva_a_vector_dot_product_seq \ No newline at end of file diff --git a/tasks/seq/rezantseva_a_vector_dot_product/perf_tests/main.cpp b/tasks/seq/rezantseva_a_vector_dot_product/perf_tests/main.cpp new file mode 100644 index 00000000000..31a79497b14 --- /dev/null +++ b/tasks/seq/rezantseva_a_vector_dot_product/perf_tests/main.cpp @@ -0,0 +1,106 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp" + +static int offset = 0; + +std::vector createRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(rezantseva_a_vector_dot_product_seq, test_pipeline_run) { + const int count = 100000000; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} + +TEST(rezantseva_a_vector_dot_product_seq, test_task_run) { + const int count = 100000000; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/rezantseva_a_vector_dot_product/src/ops_seq.cpp b/tasks/seq/rezantseva_a_vector_dot_product/src/ops_seq.cpp new file mode 100644 index 00000000000..cc375d53f30 --- /dev/null +++ b/tasks/seq/rezantseva_a_vector_dot_product/src/ops_seq.cpp @@ -0,0 +1,48 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp" + +bool rezantseva_a_vector_dot_product_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->outputs.size() == taskData->outputs_count.size()) && taskData->outputs.size() == 1 && + taskData->outputs_count[0] == 1; +} + +bool rezantseva_a_vector_dot_product_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + input_ = std::vector>(taskData->inputs.size()); + for (size_t i = 0; i < input_.size(); i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + for (size_t j = 0; j < taskData->inputs_count[i]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res = 0; + return true; +} + +bool rezantseva_a_vector_dot_product_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_[0].size(); i++) { + res += input_[0][i] * input_[1][i]; + } + + return true; +} + +bool rezantseva_a_vector_dot_product_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +int rezantseva_a_vector_dot_product_seq::vectorDotProduct(const std::vector& v1, const std::vector& v2) { + long long result = 0; + for (size_t i = 0; i < v1.size(); i++) result += v1[i] * v2[i]; + return result; +} \ No newline at end of file From 5e5f93a2ffe9d9875787b633fe474798bcf4fb83 Mon Sep 17 00:00:00 2001 From: Arseny-Korobeinikov <113035626+Arseny-Korobeinikov@users.noreply.github.com> Date: Thu, 31 Oct 2024 03:40:49 +0300 Subject: [PATCH 030/155] =?UTF-8?q?=D0=9A=D0=BE=D1=80=D0=BE=D0=B1=D0=B5?= =?UTF-8?q?=D0=B9=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2=20=D0=90=D1=80=D1=81=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B9.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.?= =?UTF-8?q?=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2015.=20=D0=9D?= =?UTF-8?q?=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC?= =?UTF-8?q?=D0=B0=D0=BA=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B?= =?UTF-8?q?=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20?= =?UTF-8?q?=D0=BF=D0=BE=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20?= =?UTF-8?q?=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#50)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Цель: Найти максимальные элементы в каждой строке матрицы. Последовательное решение задачи: Запустить цикл по строкам матрицы, для каждой строки найти максимальный элемент и положить его в результирующий вектор. В результирующем векторе на i-ой позиции стоит максимальный элемент i-ой строки. Параллельное решение задачи: Представляем матрицу, как один большой массив, делим его на количество процессов и каждому процессу отдаём свою часть. Поскольку элементов может быть не кратно количеству процессов, то последнему процессу отдаём остаток от деления количества элементов матрицы на количество процессов. Каждый процесс считает на своём участке максимальные элементы для тех строк, которые он затрагивает (даже частично), и редуцирует их в результирующий вектор. Поскольку редуцирование это коллективная операция, и в i-ый элемент результирующего вектора будет ожидаться ответ от каждого процесса, то нам необходимо каждому процессу также средуцировать INT_MIN в те элементы результирующего вектора, на которые наш процесс не должен оказывать влияние. В конечно итоге мы получим правильный результирующий вектор. --- .../func_tests/main_korobeinikov.cpp | 251 ++++++++++++++++++ .../include/ops_mpi_korobeinikov.hpp | 52 ++++ .../perf_tests/main_korobeinikov.cpp | 100 +++++++ .../src/ops_mpi_korobeinikov.cpp | 192 ++++++++++++++ .../func_tests/main_korobeinikov.cpp | 161 +++++++++++ .../include/ops_seq_korobeinikov.hpp | 26 ++ .../perf_tests/main_korobeinikov.cpp | 93 +++++++ .../src/ops_seq_korobeinikov.cpp | 51 ++++ 8 files changed, 926 insertions(+) create mode 100644 tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp create mode 100644 tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp create mode 100644 tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp create mode 100644 tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_mpi_korobeinikov.cpp create mode 100644 tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp create mode 100644 tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp create mode 100644 tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp create mode 100644 tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_seq_korobeinikov.cpp diff --git a/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp new file mode 100644 index 00000000000..b034cfd33ab --- /dev/null +++ b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp @@ -0,0 +1,251 @@ +// Copyright 2024 Korobeinikov Arseny +#include + +#include +#include +#include + +#include "mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp" + +TEST(max_elements_in_rows_of_matrix_mpi, Test_1_const__matrix) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 4; // not const, because reinterpret_cast does not work with const + std::vector global_matrix{3, 17, 5, -1, 2, -3, 11, 12, 13, -7, 4, 9}; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(right_answer.data())); + taskDataSeq->outputs_count.emplace_back(right_answer.size()); + + // Create Task + korobeinikov_a_test_task_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(right_answer, mpi_res); + } +} + +TEST(max_elements_in_rows_of_matrix_mpi, Test_2_random_matrix) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 10; // not const, because reinterpret_cast does not work with const + int size_rows = 20; + std::vector global_matrix; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = korobeinikov_a_test_task_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(right_answer.data())); + taskDataSeq->outputs_count.emplace_back(right_answer.size()); + + // Create Task + korobeinikov_a_test_task_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(right_answer, mpi_res); + } +} + +TEST(max_elements_in_rows_of_matrix_mpi, Test_3_false_validation) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 10; // not const, because reinterpret_cast does not work with const + std::vector global_matrix{3, 17, 5, -1, 2, -3, 11, 12, 13, -7, 4, 9}; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(max_elements_in_rows_of_matrix_mpi, Test_4_empty_matrix) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 0; // not const, because reinterpret_cast does not work with const + std::vector global_matrix; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(right_answer.data())); + taskDataSeq->outputs_count.emplace_back(right_answer.size()); + + // Create Task + korobeinikov_a_test_task_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(right_answer, mpi_res); + } +} + +TEST(max_elements_in_rows_of_matrix_mpi, Test_5_one_row_matrix) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 1; // not const, because reinterpret_cast does not work with const + std::vector global_matrix{1, 3, 2}; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(right_answer.data())); + taskDataSeq->outputs_count.emplace_back(right_answer.size()); + + // Create Task + korobeinikov_a_test_task_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(right_answer, mpi_res); + } +} \ No newline at end of file diff --git a/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp new file mode 100644 index 00000000000..fad4ba17edb --- /dev/null +++ b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp @@ -0,0 +1,52 @@ +// Copyright 2024 Korobeinikov Arseny +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace korobeinikov_a_test_task_mpi { + +std::vector getRandomVector(int sz); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res; + int count_rows; + int size_rows; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + std::vector res; + int count_rows; + int size_rows; + int num_use_proc; + boost::mpi::communicator world; +}; + +} // namespace korobeinikov_a_test_task_mpi \ No newline at end of file diff --git a/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp new file mode 100644 index 00000000000..96e90a1c879 --- /dev/null +++ b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp @@ -0,0 +1,100 @@ +// Copyright 2024 Korobeinikov Arseny +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp" + +TEST(mpi_korobeinikov_a_max_elements_in_rows_of_matrix_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 100; // not const, because reinterpret_cast does not work with const + std::vector global_matrix; + std::vector mpi_res(count_rows, 0); + std::vector right_answer(count_rows, 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = std::vector(count_rows * 500000, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(mpi_res, right_answer); + } +} + +TEST(mpi_korobeinikov_a_max_elements_in_rows_of_matrix_perf_test, test_task_run) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 200; // not const, because reinterpret_cast does not work with const + std::vector global_matrix; + std::vector mpi_res(count_rows, 0); + std::vector right_answer(count_rows, 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = std::vector(count_rows * 500000, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(mpi_res, right_answer); + } +} diff --git a/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_mpi_korobeinikov.cpp b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_mpi_korobeinikov.cpp new file mode 100644 index 00000000000..2dd94fefb7d --- /dev/null +++ b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_mpi_korobeinikov.cpp @@ -0,0 +1,192 @@ +// Copyright 2024 Korobeinikov Arseny +#include "mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector korobeinikov_a_test_task_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + input_.reserve(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], std::back_inserter(input_)); + + count_rows = (int)*taskData->inputs[1]; + if (count_rows != 0) { + size_rows = (int)(taskData->inputs_count[0] / (*taskData->inputs[1])); + } else { + size_rows = 0; + } + res = std::vector(count_rows, 0); + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + if ((*taskData->inputs[1]) == 0) { + return true; + } + return (*taskData->inputs[1] == taskData->outputs_count[0] && + (taskData->inputs_count[0] % (*taskData->inputs[1])) == 0); +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + res[i] = *std::max_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); + } + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if ((*taskData->inputs[1]) == 0) { + return true; + } + return (*taskData->inputs[1] == taskData->outputs_count[0] && + (taskData->inputs_count[0] % (*taskData->inputs[1])) == 0); + } + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + unsigned int delta = 0; + + if (world.rank() == 0) { + count_rows = (int)*taskData->inputs[1]; + if (count_rows != 0) { + size_rows = (int)(taskData->inputs_count[0] / (*taskData->inputs[1])); + } else { + size_rows = 0; + } + if (count_rows != 0) { + num_use_proc = std::min(world.size(), count_rows * size_rows); + } else { + num_use_proc = world.size(); + } + delta = taskData->inputs_count[0] / num_use_proc; + } + broadcast(world, delta, 0); + broadcast(world, count_rows, 0); + if (count_rows == 0) { + return true; + } + broadcast(world, size_rows, 0); + broadcast(world, num_use_proc, 0); + + if (world.rank() == 0) { + // Init vectors + input_.reserve(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], std::back_inserter(input_)); + + for (int proc = 1; proc < num_use_proc - 1; proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta); + } + if (num_use_proc != 1) { + int proc = num_use_proc - 1; + world.send(proc, 0, input_.data() + proc * delta, delta + taskData->inputs_count[0] % num_use_proc); + } + } + + if (world.rank() == 0) { + local_input_ = std::vector(delta); + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + if (world.rank() == num_use_proc - 1 && num_use_proc != 0) { + local_input_ = std::vector(delta + (count_rows * size_rows) % num_use_proc); + world.recv(0, 0, local_input_.data(), delta + (count_rows * size_rows) % num_use_proc); + } else { + if (world.rank() < num_use_proc) { + local_input_ = std::vector(delta); + world.recv(0, 0, local_input_.data(), delta); + } + } + } + + res = std::vector(count_rows, 0); + + size_t default_local_size = 0; + if (world.rank() == 0) { + default_local_size = local_input_.size(); + } + broadcast(world, default_local_size, 0); + + if (world.rank() < num_use_proc) { + unsigned int ind = (world.rank() * default_local_size) / size_rows; + for (unsigned int i = 0; i < ind; ++i) { + reduce(world, INT_MIN, res[i], boost::mpi::maximum(), 0); + } + + unsigned int near_end = std::min(local_input_.size(), size_rows - (world.rank() * default_local_size) % size_rows); + int local_res; + + local_res = *std::max_element(local_input_.begin(), local_input_.begin() + near_end); + reduce(world, local_res, res[ind], boost::mpi::maximum(), 0); + ++ind; + + unsigned int k = 0; + while (local_input_.begin() + near_end + k * size_rows < local_input_.end()) { + local_res = + *std::max_element(local_input_.begin() + near_end + k * size_rows, + std::min(local_input_.end(), local_input_.begin() + near_end + (k + 1) * size_rows)); + reduce(world, local_res, res[ind], boost::mpi::maximum(), 0); + ++k; + ++ind; + } + + for (unsigned int i = ind; i < res.size(); ++i) { + reduce(world, INT_MIN, res[i], boost::mpi::maximum(), 0); + } + } else { + for (unsigned int i = 0; i < res.size(); ++i) { + reduce(world, INT_MIN, res[i], boost::mpi::maximum(), 0); + } + } + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp new file mode 100644 index 00000000000..ed6c5957f54 --- /dev/null +++ b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp @@ -0,0 +1,161 @@ +// Copyright 2024 Korobeinikov Arseny +#include + +#include + +#include "seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp" + +TEST(max_elements_in_rows_of_matrix_seq, Test_1_without_negative_max_elemet) { + // Create data + int count_rows = 4; // not const, because reinterpret_cast does not work with const + std::vector matrix{3, 17, 5, -1, 2, -3, 11, 12, 13, -7, 4, 9}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {17, 2, 13, 9}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(right_answer, seq_res); +} + +TEST(max_elements_in_rows_of_matrix_seq, Test_2_with_negative_max_elemet) { + // Create data + int count_rows = 4; // not const, because reinterpret_cast does not work with const + std::vector matrix{3, 7, 5, -6, -10, -8, 15, 12, 21, -7, 0, 9}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {7, -6, 21, 9}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(right_answer, seq_res); +} + +TEST(max_elements_in_rows_of_matrix_seq, Test_3_only_zero) { + // Create data + int count_rows = 2; // not const, because reinterpret_cast does not work with const + std::vector matrix{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {0, 0}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(right_answer, seq_res); +} + +TEST(max_elements_in_rows_of_matrix_seq, Test_4_empty_matrix) { + // Create data + int count_rows = 0; // not const, because reinterpret_cast does not work with const + std::vector matrix; + + std::vector seq_res(count_rows, 0); + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(right_answer, seq_res); +} + +TEST(max_elements_in_rows_of_matrix_seq, Test_5_Unequal_number_of_elements_in_rows_exeption) { + // Create data + int count_rows = 2; // not const, because reinterpret_cast does not work with const + std::vector matrix{1, 2, 3, 4, 5}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {0, 0}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(max_elements_in_rows_of_matrix_seq, + Test_6_number_of_elements_in_the_output_is_not_equal_to_number_of_rows_exeption) { + // Create data + int count_rows = 2; // not const, because reinterpret_cast does not work with const + std::vector matrix{1, 2, 3, 4, 5}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {0, 0}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} diff --git a/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp new file mode 100644 index 00000000000..261cb6d7e1c --- /dev/null +++ b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp @@ -0,0 +1,26 @@ +// Copyright 2024 Korobeinikov Arseny +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace korobeinikov_a_test_task_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res; + int count_rows{}; + int size_rows{}; +}; + +} // namespace korobeinikov_a_test_task_seq \ No newline at end of file diff --git a/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp new file mode 100644 index 00000000000..679fb730dbd --- /dev/null +++ b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp @@ -0,0 +1,93 @@ +// Copyright 2024 Korobeinikov Arseny +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp" + +TEST(sequential_korobeinikov_perf_test, test_pipeline_run) { + // Create data + int count_rows = 500; // not const, because reinterpret_cast does not work with const + std::vector matrix(count_rows * 10000, 10); + + std::vector seq_res(count_rows, 0); + std::vector right_answer = std::vector(count_rows, 10); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < seq_res.size(); i++) { + EXPECT_EQ(10, seq_res[0]); + } +} + +TEST(sequential_korobeinikov_perf_test, test_task_run) { + // Create data + int count_rows = 500; // not const, because reinterpret_cast does not work with const + std::vector matrix(count_rows * 100000, 10); + + std::vector seq_res(count_rows, 0); + std::vector right_answer = std::vector(count_rows, 10); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < seq_res.size(); i++) { + EXPECT_EQ(10, seq_res[0]); + } +} diff --git a/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_seq_korobeinikov.cpp b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_seq_korobeinikov.cpp new file mode 100644 index 00000000000..44c1e2a92a3 --- /dev/null +++ b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_seq_korobeinikov.cpp @@ -0,0 +1,51 @@ +// Copyright 2024 Korobeinikov Arseny +#include "seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool korobeinikov_a_test_task_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + input_.reserve(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], std::back_inserter(input_)); + count_rows = (int)*taskData->inputs[1]; + if (count_rows != 0) { + size_rows = (int)(taskData->inputs_count[0] / (*taskData->inputs[1])); + } else { + size_rows = 0; + } + + res = std::vector(count_rows, 0); + return true; +} + +bool korobeinikov_a_test_task_seq::TestTaskSequential::validation() { + internal_order_test(); + + if ((*taskData->inputs[1]) == 0) { + return true; + } + return (*taskData->inputs[1] == taskData->outputs_count[0] && + (taskData->inputs_count[0] % (*taskData->inputs[1])) == 0); +} + +bool korobeinikov_a_test_task_seq::TestTaskSequential::run() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + res[i] = *std::max_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); + } + return true; +} + +bool korobeinikov_a_test_task_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} From 03e9c1f3ec4e320ad7396bf7b4ddc3ab1eb401fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D1=81=D0=B5=D0=BD=D0=B8=D1=8F=20=D0=91=D0=B5=D1=81?= =?UTF-8?q?=D1=85=D0=BC=D0=B5=D0=BB=D1=8C=D0=BD=D0=BE=D0=B2=D0=B0?= <113203195+KseniyaBeskhmelnova@users.noreply.github.com> Date: Thu, 31 Oct 2024 06:24:37 +0300 Subject: [PATCH 031/155] =?UTF-8?q?=D0=91=D0=B5=D1=81=D1=85=D0=BC=D0=B5?= =?UTF-8?q?=D0=BB=D1=8C=D0=BD=D0=BE=D0=B2=D0=B0=20=D0=9A=D1=81=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D1=8F.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20?= =?UTF-8?q?=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=208.=20=D0=9D=D0=B0?= =?UTF-8?q?=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BD=D0=B0?= =?UTF-8?q?=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5=20=D0=BE=D1=82=D0=BB=D0=B8?= =?UTF-8?q?=D1=87=D0=B0=D1=8E=D1=89=D0=B8=D1=85=D1=81=D1=8F=20=D0=BF=D0=BE?= =?UTF-8?q?=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D1=8E=20=D1=81?= =?UTF-8?q?=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5?= =?UTF-8?q?=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82?= =?UTF-8?q?=D0=BE=D1=80=D0=B0.=20(#49)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../func_tests/main.cpp | 244 ++++++++++++++++++ .../include/mpi.hpp | 53 ++++ .../perf_tests/main.cpp | 95 +++++++ .../src/mpi.cpp | 189 ++++++++++++++ .../func_tests/main.cpp | 154 +++++++++++ .../include/seq.hpp | 31 +++ .../perf_tests/main.cpp | 84 ++++++ .../src/seq.cpp | 71 +++++ 8 files changed, 921 insertions(+) create mode 100644 tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp create mode 100644 tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp create mode 100644 tasks/seq/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/seq/beskhmelnova_k_most_different_neighbor_elements/include/seq.hpp create mode 100644 tasks/seq/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp diff --git a/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..385ff5877f2 --- /dev/null +++ b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,244 @@ +#include + +#include + +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp" +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp" + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_size_100) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100; + global_vec = beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_size_100_with_equal_elements) { + boost::mpi::communicator world; + std::vector global_vec(100, 1); + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_size_10000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10000; + global_vec = beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_uneven_size_10001) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1001; + global_vec = beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_size_100000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100000; + global_vec = beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} diff --git a/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp new file mode 100644 index 00000000000..013a008c928 --- /dev/null +++ b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace beskhmelnova_k_most_different_neighbor_elements_mpi { + +template +std::vector getRandomVector(int sz); + +template +int position_of_first_neighbour_seq(std::vector vector); + +template +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + DataType res[2]; +}; + +template +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int local_input_size; + DataType res[2]; + boost::mpi::communicator world; +}; +} // namespace beskhmelnova_k_most_different_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..1f75b6a0218 --- /dev/null +++ b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,95 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp" +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp" + +TEST(mpi_beskhmelnova_k_most_different_neighbor_elements_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + int index = beskhmelnova_k_most_different_neighbor_elements_mpi::position_of_first_neighbour_seq(global_vec); + ASSERT_EQ(global_vec[index], global_out[0]); + ASSERT_EQ(global_vec[index + 1], global_out[1]); + } +} + +TEST(mpi_beskhmelnova_k_most_different_neighbor_elements_perf_test, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 25000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + int index = beskhmelnova_k_most_different_neighbor_elements_mpi::position_of_first_neighbour_seq(global_vec); + ASSERT_EQ(global_vec[index], global_out[0]); + ASSERT_EQ(global_vec[index + 1], global_out[1]); + } +} diff --git a/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp new file mode 100644 index 00000000000..8a0d9d5a217 --- /dev/null +++ b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp @@ -0,0 +1,189 @@ +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp" + +template +std::vector beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +template +int beskhmelnova_k_most_different_neighbor_elements_mpi::position_of_first_neighbour_seq(std::vector vector) { + int n = vector.size(); + if (n == 0 || n == 1) return -1; + DataType max_dif = abs(vector[0] - vector[1]); + DataType dif; + int index = 0; + for (int i = 1; i < n - 1; i++) { + dif = abs(vector[i] - vector[i + 1]); + if (dif > max_dif) { + max_dif = dif; + index = i; + } + } + return index; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input + int n = taskData->inputs_count[0]; + input_ = std::vector(n); + void* ptr_r = taskData->inputs[0]; + void* ptr_d = input_.data(); + memcpy(ptr_d, ptr_r, sizeof(DataType) * n); + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 2 && taskData->inputs_count[0] > 1; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + int index = position_of_first_neighbour_seq(input_); + if (index == -1) { + res[0] = -1; + res[1] = -1; + return true; + } + res[0] = input_[index]; + res[1] = input_[index + 1]; + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res[0]; + reinterpret_cast(taskData->outputs[0])[1] = res[1]; + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + broadcast(world, delta, 0); + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tempPtr[i]; + } + for (int process = 1; process < world.size(); process++) { + world.send(process, 0, input_.data() + process * delta, delta); + } + } + local_input_ = std::vector(delta); + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + res[0] = 0; + res[1] = 1; + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 2 && taskData->inputs_count[0] > 1; + } + return true; +} + +// Struct of 2 most different neighbour elements +template +struct NeighborDifference { + DataType first; + DataType second; + DataType dif; +}; + +template +NeighborDifference find_max_difference(const std::vector& vector) { + int n = vector.size(); + if (n == 0 || n == 1) return {1, 1, -1}; + NeighborDifference max_dif = {vector[0], vector[1], std::abs(vector[1] - vector[0])}; + for (int i = 1; i < n - 1; ++i) { + DataType dif = std::abs(vector[i + 1] - vector[i]); + if (dif > max_dif.dif) { + max_dif = {vector[i], vector[i + 1], dif}; + } + } + return max_dif; +} + +template +void reduce_max_difference(const DataType* in_data, DataType* inout_data, int* len, MPI_Datatype* dptr) { + if (in_data[2] > inout_data[2]) { + inout_data[0] = in_data[0]; + inout_data[1] = in_data[1]; + inout_data[2] = in_data[2]; + } +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + NeighborDifference local_result = find_max_difference(local_input_); + DataType last_element = local_input_.back(); + DataType first_element = local_input_.front(); + DataType next_first_element = 0; + DataType prev_last_element = 0; + if (world.rank() < world.size() - 1) { + world.send(world.rank() + 1, 0, last_element); + world.recv(world.rank() + 1, 0, next_first_element); + } + if (world.rank() > 0) { + world.send(world.rank() - 1, 0, first_element); + world.recv(world.rank() - 1, 0, prev_last_element); + } + if (world.rank() > 0) { + DataType dif = std::abs(first_element - prev_last_element); + if (dif > local_result.dif) local_result = {prev_last_element, first_element, dif}; + } + if (world.rank() < world.size() - 1) { + DataType dif = std::abs(next_first_element - last_element); + if (dif > local_result.dif) local_result = {last_element, next_first_element, dif}; + } + DataType local_data[3] = {local_result.first, local_result.second, local_result.dif}; + DataType global_data[3] = {0, 0, 0}; + MPI_Op custom_op; + MPI_Op_create(reinterpret_cast(&reduce_max_difference), 1, &custom_op); + if (typeid(DataType) == typeid(int)) MPI_Reduce(local_data, global_data, 3, MPI_INT, custom_op, 0, MPI_COMM_WORLD); + if (typeid(DataType) == typeid(double)) + MPI_Reduce(local_data, global_data, 3, MPI_DOUBLE, custom_op, 0, MPI_COMM_WORLD); + if (world.rank() == 0) { + res[0] = global_data[0]; + res[1] = global_data[1]; + } + MPI_Op_free(&custom_op); + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res[0]; + reinterpret_cast(taskData->outputs[0])[1] = res[1]; + } + return true; +} diff --git a/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..4bcd3caa5e7 --- /dev/null +++ b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,154 @@ +#include + +#include "seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp" + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_vector_int_100) { + const int count = 100; + + // Create data + std::vector in(count); + std::vector out(2); + + in = beskhmelnova_k_most_different_neighbor_elements_seq::getRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int index = testTaskSequential.position_of_first_neighbour_seq(in); + ASSERT_EQ(in[index], out[0]); + ASSERT_EQ(in[index + 1], out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_vector_int_10000) { + const int count = 10000; + + // Create data + std::vector in(count, 5); + std::vector out(2); + + in = beskhmelnova_k_most_different_neighbor_elements_seq::getRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int index = testTaskSequential.position_of_first_neighbour_seq(in); + ASSERT_EQ(in[index], out[0]); + ASSERT_EQ(in[index + 1], out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_vector_int_100_equal_elements) { + const int count = 1000; + const int elem = 7; + + // Create data + std::vector in(count, elem); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(elem, out[0]); + ASSERT_EQ(elem, out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_1_size_vector_int) { + const int count = 1; + + // Create data + std::vector in(count); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(-1, out[0]); + ASSERT_EQ(-1, out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_0_size_vector_int) { + const int count = 0; + + // Create data + std::vector in(count); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(-1, out[0]); + ASSERT_EQ(-1, out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_2_size_vector_int) { + const int count = 2; + + // Create data + std::vector in(count); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(in[0], out[0]); + ASSERT_EQ(in[1], out[1]); +} diff --git a/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/include/seq.hpp b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/include/seq.hpp new file mode 100644 index 00000000000..83258e17f96 --- /dev/null +++ b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/include/seq.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace beskhmelnova_k_most_different_neighbor_elements_seq { + +template +std::vector getRandomVector(int sz); + +template +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + int position_of_first_neighbour_seq(std::vector vector); + + private: + std::vector input_; + DataType res[2]; +}; + +} // namespace beskhmelnova_k_most_different_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..cc58280d96c --- /dev/null +++ b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,84 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp" + +TEST(sequential_beskhmelnova_k_most_different_neighbor_element_perf_test, test_pipeline_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 1); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + int index = testTaskSequential->position_of_first_neighbour_seq(in); + ASSERT_EQ(in[index], out[0]); + ASSERT_EQ(in[index + 1], out[1]); +} + +TEST(sequential_beskhmelnova_k_most_different_neighbor_element_perf_test, test_task_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 1); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + int index = testTaskSequential->position_of_first_neighbour_seq(in); + ASSERT_EQ(in[index], out[0]); + ASSERT_EQ(in[index + 1], out[1]); +} diff --git a/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp new file mode 100644 index 00000000000..376f5d85b0d --- /dev/null +++ b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp @@ -0,0 +1,71 @@ +#include "seq/beskhmelnova_k_most_different_neighbor_elements/include/seq.hpp" + +template +std::vector beskhmelnova_k_most_different_neighbor_elements_seq::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +template +int beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::position_of_first_neighbour_seq( + std::vector vector) { + int n = vector.size(); + if (n == 0 || n == 1) return -1; + DataType max_dif = abs(vector[0] - vector[1]); + DataType dif; + int index = 0; + for (int i = 1; i < n - 1; i++) { + dif = abs(vector[i] - vector[i + 1]); + if (dif > max_dif) { + max_dif = dif; + index = i; + } + } + return index; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input + int n = taskData->inputs_count[0]; + input_ = std::vector(n); + void* ptr_r = taskData->inputs[0]; + void* ptr_d = input_.data(); + memcpy(ptr_d, ptr_r, sizeof(DataType) * n); + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count.size() == 1 && taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 2; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::run() { + internal_order_test(); + int index = position_of_first_neighbour_seq(input_); + if (index == -1) { + res[0] = -1; + res[1] = -1; + return true; + } + res[0] = input_[index]; + res[1] = input_[index + 1]; + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res[0]; + reinterpret_cast(taskData->outputs[0])[1] = res[1]; + return true; +} \ No newline at end of file From f6c46cc450fddac1fece46f26008b3c64b68ee7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=92=D0=BB=D0=B0=D0=B4=D0=B8=D0=BC=D0=B8=D1=80=20=D0=92?= =?UTF-8?q?=D0=BB=D0=B0=D0=B4=D0=B8=D0=BC=D0=B8=D1=80=D0=BE=D0=B2=D0=B8?= =?UTF-8?q?=D1=87=20=D0=9F=D1=83=D1=82=D0=B8=D0=BD!?= <144924225+PutinVVV@users.noreply.github.com> Date: Thu, 31 Oct 2024 18:41:51 +0300 Subject: [PATCH 032/155] =?UTF-8?q?=D0=A8=D1=83=D1=80=D1=8B=D0=B3=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=20=E2=84=961.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=20=E2=84=9616.=20=D0=9D=D0=B0=D1=85?= =?UTF-8?q?=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA?= =?UTF-8?q?=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7?= =?UTF-8?q?=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20?= =?UTF-8?q?=D1=81=D1=82=D0=BE=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0?= =?UTF-8?q?=D1=82=D1=80=D0=B8=D1=86=D1=8B=20(#33)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Код выполняет вычисление максимальных значений в каждом столбце матрицы. Матрица представлена как двухмерный вектор целых чисел **(std::vector>)**. **Описание последовательной задачи** Каждый элемент матрицы сравнивается с текущим максимальным значением в соответствующем столбце, и при нахождении большего значения оно сохраняется как новое максимальное значение. В конце выполнения функции возвращается вектор, содержащий максимальные значения для каждого столбца. **Описание MPI задачи** Матрица целых чисел разбивается на равные части. Каждый процесс получает свою часть матрицы и находит максимальные значения в каждом столбце этой части. Затем все процессы отправляют свои результаты на главный процесс, который собирает их и определяет окончательные максимальные значения для каждого столбца всей матрицы. --------- Co-authored-by: Шурыгин Сергей --- .../func_tests/main.cpp | 264 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 ++++ .../perf_tests/main.cpp | 70 +++++ .../src/ops_mpi.cpp | 170 +++++++++++ .../func_tests/main.cpp | 209 ++++++++++++++ .../include/ops_seq.hpp | 25 ++ .../perf_tests/main.cpp | 77 +++++ .../src/ops_seq.cpp | 80 ++++++ 8 files changed, 942 insertions(+) create mode 100644 tasks/mpi/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/Shurygin_S_max_po_stolbam_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp create mode 100644 tasks/seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/Shurygin_S_max_po_stolbam_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..c0c80de631e --- /dev/null +++ b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp @@ -0,0 +1,264 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp" + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, EmptyInputs) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, EmptyOutputs) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs_count.push_back(4); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, IncorrectInputsCountSize) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + taskDataPar->outputs_count.push_back(4); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, IncorrectInputsCountValue) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs_count.push_back(0); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + taskDataPar->outputs_count.push_back(4); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, IncorrectOutputsCountSize) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs_count.push_back(4); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + taskDataPar->outputs_count.push_back(3); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, IncorrectOutputsCountValue) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs_count.push_back(4); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + taskDataPar->outputs_count.push_back(5); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, find_max_val_in_col_10x10_matrix) { + boost::mpi::communicator world; + const int count_rows = 10; + const int count_columns = 10; + std::vector> global_matrix; + std::vector global_max(count_columns, INT_MIN); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_max(count_columns, INT_MIN); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int j = 0; j < count_columns; j++) { + ASSERT_EQ(global_max[j], 200); + } + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, find_max_val_in_col_100x100_matrix) { + boost::mpi::communicator world; + const int count_rows = 100; + const int count_columns = 100; + std::vector> global_matrix; + std::vector global_max(count_columns, INT_MIN); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_max(count_columns, INT_MIN); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int j = 0; j < count_columns; j++) { + ASSERT_EQ(global_max[j], 200); + } + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, find_max_val_in_col_100x500_matrix) { + boost::mpi::communicator world; + const int count_rows = 100; + const int count_columns = 500; + std::vector> global_matrix; + std::vector global_max(count_columns, INT_MIN); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_max(count_columns, INT_MIN); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int j = 0; j < count_columns; j++) { + ASSERT_EQ(global_max[j], 200); + } + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, find_max_val_in_col_3000x3000_matrix) { + boost::mpi::communicator world; + const int count_rows = 3000; + const int count_columns = 3000; + std::vector> global_matrix; + std::vector global_max(count_columns, INT_MIN); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_max(count_columns, INT_MIN); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int j = 0; j < count_columns; j++) { + ASSERT_EQ(global_max[j], 200); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..f92e3eaed9f --- /dev/null +++ b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace Shurygin_S_max_po_stolbam_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector generate_random_vector(int size, int lower_bound = 0, int upper_bound = 50); + static std::vector> generate_random_matrix(int rows, int columns); + + private: + std::vector> input_; + std::vector res_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector> local_input_; + std::vector res_; + boost::mpi::communicator world; +}; + +} // namespace Shurygin_S_max_po_stolbam_matrix_mpi diff --git a/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..7692a301b68 --- /dev/null +++ b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp @@ -0,0 +1,70 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp" + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi_perf_test, test_pipeline_run_max) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max; + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows = 5000; + int count_columns = 5000; + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + global_max.resize(count_columns, INT_MIN); + for (auto& row : global_matrix) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + if (world.rank() == 0) { + for (size_t j = 0; j < global_max.size(); ++j) { + ASSERT_EQ(global_max[j], 200); + } + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi_perf_test, test_task_run_max) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max; + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows = 4560; + int count_columns = 4560; + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + global_max.resize(count_columns, INT_MIN); + for (auto& row : global_matrix) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + if (world.rank() == 0) { + for (size_t j = 0; j < global_max.size(); ++j) { + ASSERT_EQ(global_max[j], 200); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/src/ops_mpi.cpp b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..45b757260fe --- /dev/null +++ b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/src/ops_mpi.cpp @@ -0,0 +1,170 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + input_.resize(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = input_matrix[j]; + } + } + res_.resize(cols); + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + if (taskData->inputs.empty() || taskData->outputs.empty()) { + return false; + } + if (taskData->inputs_count.size() < 2 || taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) { + return false; + } + if (taskData->outputs_count.size() != 1 || taskData->outputs_count[0] != taskData->inputs_count[1]) { + return false; + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t j = 0; j < input_[0].size(); j++) { + int max_val = input_[0][j]; + for (size_t i = 1; i < input_.size(); i++) { + if (input_[i][j] > max_val) { + max_val = input_[i][j]; + } + } + res_[j] = max_val; + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matrix[i] = res_[i]; + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + int rows = 0; + int cols = 0; + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + } + broadcast(world, rows, 0); + broadcast(world, cols, 0); + int delta = rows / world.size(); + int extra = rows % world.size(); + if (world.rank() == 0) { + input_.resize(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + input_[i].assign(input_matrix, input_matrix + cols); + } + for (int proc = 1; proc < world.size(); proc++) { + int start_row = proc * delta + std::min(proc, extra); + int num_rows = delta + (proc < extra ? 1 : 0); + for (int r = start_row; r < start_row + num_rows; r++) { + world.send(proc, 0, input_[r].data(), cols); + } + } + } + int local_rows = delta + (world.rank() < extra ? 1 : 0); + local_input_.resize(local_rows, std::vector(cols)); + if (world.rank() == 0) { + std::copy(input_.begin(), input_.begin() + local_rows, local_input_.begin()); + } else { + for (int r = 0; r < local_rows; r++) { + world.recv(0, 0, local_input_[r].data(), cols); + } + } + res_.resize(cols); + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if (taskData->inputs.empty() || taskData->outputs.empty()) return false; + if (taskData->inputs_count.size() < 2 || taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) + return false; + if (taskData->outputs_count.size() != 1 || taskData->outputs_count[0] != taskData->inputs_count[1]) return false; + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + std::vector local_maxes(local_input_[0].size(), INT_MIN); + for (size_t j = 0; j < local_input_[0].size(); j++) { + for (size_t i = 0; i < local_input_.size(); i++) { + local_maxes[j] = std::max(local_maxes[j], local_input_[i][j]); + } + } + if (world.rank() == 0) { + std::vector global_maxes(res_.size(), INT_MIN); + std::copy(local_maxes.begin(), local_maxes.end(), global_maxes.begin()); + for (int proc = 1; proc < world.size(); proc++) { + std::vector proc_maxes(res_.size()); + world.recv(proc, 0, proc_maxes.data(), res_.size()); + for (size_t j = 0; j < res_.size(); j++) { + global_maxes[j] = std::max(global_maxes[j], proc_maxes[j]); + } + } + std::copy(global_maxes.begin(), global_maxes.end(), res_.begin()); + } else { + world.send(0, 0, local_maxes.data(), local_maxes.size()); + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + std::copy(res_.begin(), res_.end(), output_matrix); + } + return true; +} + +std::vector Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_vector(int size, + int lower_bound, + int upper_bound) { + std::vector v1(size); + for (auto& num : v1) { + num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return v1; +} + +std::vector> Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix( + int rows, int columns) { + std::vector> matrix1(rows, std::vector(columns)); + for (int i = 0; i < rows; ++i) { + matrix1[i] = generate_random_vector(columns, 1, 100); + } + for (int j = 0; j < columns; ++j) { + int random_row = std::rand() % rows; + matrix1[random_row][j] = 200; + } + return matrix1; +} \ No newline at end of file diff --git a/tasks/seq/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..b0c59511a2f --- /dev/null +++ b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp @@ -0,0 +1,209 @@ +// Copyright 2023 Nesterov Alexander + +#include + +#include + +#include "seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp" + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, EmptyInputs) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, EmptyOutputs) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs_count.push_back(4); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, IncorrectInputsCountSize) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + taskDataSeq->outputs_count.push_back(4); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, IncorrectInputsCountValue) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs_count.push_back(0); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + taskDataSeq->outputs_count.push_back(4); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, IncorrectOutputsCountSize) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs_count.push_back(4); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + taskDataSeq->outputs_count.push_back(3); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, IncorrectOutputsCountValue) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs_count.push_back(4); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + taskDataSeq->outputs_count.push_back(5); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, find_max_val_in_columns_10x10_matrix) { + const int rows = 10; + const int cols = 10; + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_val = matrix_rnd[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix_rnd[i][j] > max_val) { + max_val = matrix_rnd[i][j]; + } + } + ASSERT_EQ(v_res[j], max_val); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, find_max_val_in_columns_100x100_matrix) { + const int rows = 100; + const int cols = 100; + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_val = matrix_rnd[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix_rnd[i][j] > max_val) { + max_val = matrix_rnd[i][j]; + } + } + ASSERT_EQ(v_res[j], 200); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, find_max_val_in_columns_100x500_matrix) { + const int rows = 100; + const int cols = 500; + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_val = matrix_rnd[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix_rnd[i][j] > max_val) { + max_val = matrix_rnd[i][j]; + } + } + ASSERT_EQ(v_res[j], 200); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, find_max_val_in_columns_3000x3000_matrix) { + const int rows = 3000; + const int cols = 3000; + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_val = matrix_rnd[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix_rnd[i][j] > max_val) { + max_val = matrix_rnd[i][j]; + } + } + ASSERT_EQ(v_res[j], 200); + } +} diff --git a/tasks/seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..f1efe718b50 --- /dev/null +++ b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp @@ -0,0 +1,25 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace Shurygin_S_max_po_stolbam_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector generating_random_vector(int size, int lower_bound = 0, int upper_bound = 10); + static std::vector> generate_random_matrix(int rows, int columns); + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace Shurygin_S_max_po_stolbam_matrix_seq diff --git a/tasks/seq/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..fd3c0b5fe1a --- /dev/null +++ b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp @@ -0,0 +1,77 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp" + +TEST(Shurygin_S_max_po_stolbam_matrix_seq_perf, test_pipeline_run) { + const int rows = 5000; + const int cols = 5000; + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int j = 0; j < cols; j++) { + ASSERT_EQ(v_res[j], 200); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq_perf, test_task_run) { + const int rows = 4560; + const int cols = 4560; + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int j = 0; j < cols; j++) { + ASSERT_EQ(v_res[j], 200); + } +} diff --git a/tasks/seq/Shurygin_S_max_po_stolbam_matrix/src/ops_seq.cpp b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..29c74b03456 --- /dev/null +++ b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/src/ops_seq.cpp @@ -0,0 +1,80 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp" + +#include +using namespace std::chrono_literals; + +namespace Shurygin_S_max_po_stolbam_matrix_seq { + +bool TestTaskSequential::pre_processing() { + internal_order_test(); + int rows = taskData->inputs_count[0]; + int columns = taskData->inputs_count[1]; + input_.resize(rows, std::vector(columns)); + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < columns; j++) { + input_[i][j] = input_matrix[j]; + } + } + res_.resize(columns); + return true; +} + +bool TestTaskSequential::validation() { + internal_order_test(); + if (taskData->inputs.empty() || taskData->outputs.empty()) { + return false; + } + if (taskData->inputs_count.size() < 2 || taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) { + return false; + } + if (taskData->outputs_count.size() != 1 || taskData->outputs_count[0] != taskData->inputs_count[1]) { + return false; + } + return true; +} + +bool TestTaskSequential::run() { + internal_order_test(); + for (size_t j = 0; j < input_[0].size(); j++) { + int max_val = input_[0][j]; + for (size_t i = 1; i < input_.size(); i++) { + if (input_[i][j] > max_val) { + max_val = input_[i][j]; + } + } + res_[j] = max_val; + } + return true; +} + +bool TestTaskSequential::post_processing() { + internal_order_test(); + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matrix[i] = res_[i]; + } + return true; +} + +std::vector TestTaskSequential::generating_random_vector(int size, int lower_bound, int upper_bound) { + std::vector v1(size); + for (auto& num : v1) { + num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return v1; +} + +std::vector> TestTaskSequential::generate_random_matrix(int rows, int columns) { + std::vector> matrix1(rows, std::vector(columns)); + for (int i = 0; i < rows; ++i) { + matrix1[i] = generating_random_vector(columns, 1, 100); + } + for (int j = 0; j < columns; ++j) { + int random_row = std::rand() % rows; + matrix1[random_row][j] = 200; + } + return matrix1; +} +} // namespace Shurygin_S_max_po_stolbam_matrix_seq From 9a368b66c15099f80525d061636808faa98ca4de Mon Sep 17 00:00:00 2001 From: dasha1112 <67230231+dasha1112@users.noreply.github.com> Date: Thu, 31 Oct 2024 19:11:53 +0300 Subject: [PATCH 033/155] =?UTF-8?q?=D0=9A=D0=BE=D0=BB=D0=BE=D0=BA=D0=BE?= =?UTF-8?q?=D0=BB=D0=BE=D0=B2=D0=B0=20=D0=94=D0=B0=D1=80=D1=8C=D1=8F.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2015.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA=D1=81=D0=B8?= =?UTF-8?q?=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0?= =?UTF-8?q?=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8?= =?UTF-8?q?=D1=86=D1=8B.=20(#53)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи На вход принимается вектор (матрица) и количество рядов и их размер. После этого вектор разбивается на ряды из которых образуется двумерный вектор вида std::vector>, то есть создается вектор элементами которого являются ряды матрицы, после чего в каждом ряду производится поиск максимального значения и результат сохранятся в векторе. Описание MPI задачи На вход принимается матрица, после чего на нулевом процессе она разбивается на ряды и каждому процессу передаем ряд матрицы. После этого в каждом процессе происходит локальный поиск максимума в ряду, после чего результат полученный каждым процессом оправляется нулевому с помощью метода gather и сохраняется в векторе с результатом. --------- Co-authored-by: xinod2000 <49196752+xinod2000@users.noreply.github.com> --- .../func_tests/main.cpp | 439 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 ++ .../perf_tests/main.cpp | 89 ++++ .../src/ops_mpi.cpp | 135 ++++++ .../func_tests/main.cpp | 87 ++++ .../include/ops_seq.hpp | 24 + .../perf_tests/main.cpp | 106 +++++ .../src/ops_seq.cpp | 51 ++ 8 files changed, 978 insertions(+) create mode 100644 tasks/mpi/kolokolova_d_max_of_row_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/kolokolova_d_max_of_row_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/kolokolova_d_max_of_row_matrix/func_tests/main.cpp create mode 100644 tasks/seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/kolokolova_d_max_of_row_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/kolokolova_d_max_of_row_matrix/func_tests/main.cpp b/tasks/mpi/kolokolova_d_max_of_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..38795889ad1 --- /dev/null +++ b/tasks/mpi/kolokolova_d_max_of_row_matrix/func_tests/main.cpp @@ -0,0 +1,439 @@ +#include + +#include +#include +#include + +#include "mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp" + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max1) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 3; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max2) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 5; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max3) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 10; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max4) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 15; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max5) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 20; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max6) { + boost::mpi::communicator world; + int size = world.size(); + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = 10; + int count_column = 5; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * count_column; + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector * size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max_Row1) { + boost::mpi::communicator world; + int size = world.size(); + int rank = world.rank(); + std::vector global_matrix; + std::vector global_max(size, 0); + int count_rows = 1; + int count_column = 10; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (rank == 0) { + const int count_size_vector = count_rows * count_column; // size of vector + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector * size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (rank == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max_Column1) { + boost::mpi::communicator world; + int size = world.size(); + int rank = world.rank(); + std::vector global_matrix; + std::vector global_max(size, 0); + int count_rows = 10; + int count_column = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (rank == 0) { + const int count_size_vector = count_rows * count_column; // size of vector + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector * size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (rank == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max_Empty_Matrix) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = 0; + int count_column = 0; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * count_column; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + if (world.rank() == 0) { + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} \ No newline at end of file diff --git a/tasks/mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp b/tasks/mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..8c65fc8e0c2 --- /dev/null +++ b/tasks/mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kolokolova_d_max_of_row_matrix_mpi { + +std::vector getRandomVector(int sz); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + std::vector res; + boost::mpi::communicator world; + unsigned int delta = 0; +}; + +} // namespace kolokolova_d_max_of_row_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp b/tasks/mpi/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..b540664dcdd --- /dev/null +++ b/tasks/mpi/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp" + +TEST(kolokolova_d_max_of_row_matrix_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + const int count_size_vector = count_rows * 2000000; + if (world.rank() == 0) { + global_matrix = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, global_max[0]); + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + const int count_size_vector = count_rows * 8500000; + if (world.rank() == 0) { + global_matrix = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, global_max[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/kolokolova_d_max_of_row_matrix/src/ops_mpi.cpp b/tasks/mpi/kolokolova_d_max_of_row_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..60baade22d2 --- /dev/null +++ b/tasks/mpi/kolokolova_d_max_of_row_matrix/src/ops_mpi.cpp @@ -0,0 +1,135 @@ +#include "mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector kolokolova_d_max_of_row_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + std::uniform_int_distribution dist(-100, 99); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + auto row_count = static_cast(*taskData->inputs[1]); + size_t col_count = taskData->inputs_count[0] / row_count; + + input_.resize(row_count, std::vector(col_count)); + + int* input_ptr = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < row_count; ++i) { + for (size_t j = 0; j < col_count; ++j) { + input_[i][j] = input_ptr[i * col_count + j]; + } + } + res.resize(row_count); + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return *taskData->inputs[1] == taskData->outputs_count[0]; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); ++i) { + int max_value = input_[i][0]; + for (size_t j = 1; j < input_[i].size(); ++j) { + if (input_[i][j] > max_value) { + max_value = input_[i][j]; + } + } + res[i] = max_value; + } + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + int* output_ptr = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res.size(); ++i) { + output_ptr[i] = res[i]; + } + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + int proc_rank = world.rank(); + + if (proc_rank == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + + if (proc_rank == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + } + // Init value for output + res.resize(world.size()); + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output and input + if (taskData->outputs_count[0] == 0 || taskData->inputs_count[0] == 0) return false; + } + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int proc_rank = world.rank(); + + broadcast(world, delta, 0); + + if (proc_rank == 0) { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta); + } + } + + local_input_ = std::vector(delta); + + if (proc_rank == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + int local_res = 0; + for (int i = 0; i < int(local_input_.size()); i++) { + if (local_res < local_input_[i]) local_res = local_input_[i]; + } + gather(world, local_res, res, 0); + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (int i = 0; i < world.size(); i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/kolokolova_d_max_of_row_matrix/func_tests/main.cpp b/tasks/seq/kolokolova_d_max_of_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..db125844bf8 --- /dev/null +++ b/tasks/seq/kolokolova_d_max_of_row_matrix/func_tests/main.cpp @@ -0,0 +1,87 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp" + +TEST(kolokolova_d_max_of_row_matrix_seq, Test_Max_For_Rows1) { + int count_rows = 3; + // Создание данных + std::vector global_mat = {2, 5, 4, 7, 9, 3, 5, 6, 7, 9, 2, 4, 2, 5, 0}; + std::vector seq_max_vec(count_rows, 0); + std::vector ans = {9, 9, 5}; + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + kolokolova_d_max_of_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ans, seq_max_vec); +} + +TEST(kolokolova_d_max_of_row_matrix_seq, Test_Max_For_Rows2) { + int count_rows = 4; + // Создание данных + std::vector global_mat = {1, 2, 6, 11, 3, 5, 6, 3, 5, 4, 10, 12, 20, 4, 8, 2}; + std::vector seq_max_vec(count_rows, 0); + std::vector ans = {11, 6, 12, 20}; + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + kolokolova_d_max_of_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ans, seq_max_vec); +} + +TEST(kolokolova_d_max_of_row_matrix_seq, Test_Max_For_Rows3) { + int count_rows = 5; + // Создание данных + std::vector global_mat = {10, 4, 3, 9, 7, 9, 13, 4, 6, 7, 5, 9, 12, 4, 2, 1, 10, 9, 0, 8}; + std::vector seq_max_vec(count_rows, 0); + std::vector ans = {10, 13, 9, 12, 10}; + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + kolokolova_d_max_of_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ans, seq_max_vec); +} diff --git a/tasks/seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp b/tasks/seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..dc8dd42cc7e --- /dev/null +++ b/tasks/seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kolokolova_d_max_of_row_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res; +}; + +} // namespace kolokolova_d_max_of_row_matrix_seq \ No newline at end of file diff --git a/tasks/seq/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp b/tasks/seq/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..5a20fadac16 --- /dev/null +++ b/tasks/seq/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp @@ -0,0 +1,106 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp" + +TEST(kolokolova_d_max_of_row_matrix_seq, test_pipeline_run) { + int count_rows = 200; + int size_rows = 90000; + + // Создание данных (массив с различными значениями) + std::vector global_mat; + for (int i = 0; i < count_rows; ++i) { + for (int j = 0; j < size_rows; ++j) { + global_mat.push_back(i + j); // Используем i + j для создания различных значений + } + } + + std::vector seq_max_vec(count_rows, 0); // Вектор для хранения максимальных значений + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Создание атрибутов производительности + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Количество запусков + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; // Конвертация в секунды + }; + + // Создание и инициализация результатов производительности + auto perfResults = std::make_shared(); + + // Создание анализатора производительности + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + // Печать статистики производительности + ppc::core::Perf::print_perf_statistic(perfResults); + + // Проверка результатов + for (size_t i = 0; i < seq_max_vec.size(); i++) { + EXPECT_EQ(seq_max_vec[i], + int(size_rows + i - 1)); // Проверка, что максимальное значение в каждой строке соответствует + } +} + +TEST(kolokolova_d_max_of_row_matrix_seq, test_task_run) { + int count_rows = 3000; + int size_rows = 6000; + + std::vector global_mat(count_rows * size_rows, 0); + std::vector seq_max_vec(count_rows, 0); // Вектор для хранения максимальных значений + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Создание атрибутов производительности + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Количество запусков + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; // Конвертация в секунды + }; + + // Создание и инициализация результатов производительности + auto perfResults = std::make_shared(); + + // Создание анализатора производительности + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); // Запуск задачи + + // Печать статистики производительности + ppc::core::Perf::print_perf_statistic(perfResults); + + // Проверка результатов + for (size_t i = 0; i < seq_max_vec.size(); i++) { + EXPECT_EQ(0, seq_max_vec[i]); // Проверка, что максимальное значение в каждой строке равно 0 + } +} \ No newline at end of file diff --git a/tasks/seq/kolokolova_d_max_of_row_matrix/src/ops_seq.cpp b/tasks/seq/kolokolova_d_max_of_row_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..a52c2c38b6f --- /dev/null +++ b/tasks/seq/kolokolova_d_max_of_row_matrix/src/ops_seq.cpp @@ -0,0 +1,51 @@ +#include "seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool kolokolova_d_max_of_row_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + auto row_count = static_cast(*taskData->inputs[1]); + size_t col_count = taskData->inputs_count[0] / row_count; + + input_.resize(row_count, std::vector(col_count)); + + int* input_ptr = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < row_count; ++i) { + for (size_t j = 0; j < col_count; ++j) { + input_[i][j] = input_ptr[i * col_count + j]; + } + } + res.resize(row_count); + return true; +} + +bool kolokolova_d_max_of_row_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + return *taskData->inputs[1] == taskData->outputs_count[0]; +} + +bool kolokolova_d_max_of_row_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); ++i) { + int max_value = input_[i][0]; + for (size_t j = 1; j < input_[i].size(); ++j) { + if (input_[i][j] > max_value) { + max_value = input_[i][j]; + } + } + res[i] = max_value; + } + return true; +} + +bool kolokolova_d_max_of_row_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + int* output_ptr = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res.size(); ++i) { + output_ptr[i] = res[i]; + } + return true; +} From 3ed0fe245b8b11dc59fc730209f60a067d936b1d Mon Sep 17 00:00:00 2001 From: Ivan Lysov <111500735+Ivan-Lysov@users.noreply.github.com> Date: Thu, 31 Oct 2024 19:13:33 +0300 Subject: [PATCH 034/155] =?UTF-8?q?=D0=9B=D1=8B=D1=81=D0=BE=D0=B2=20=D0=98?= =?UTF-8?q?=D0=B2=D0=B0=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.?= =?UTF-8?q?=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2020.=20=D0=98?= =?UTF-8?q?=D0=BD=D1=82=D0=B5=D0=B3=D1=80=D0=B8=D1=80=D0=BE=D0=B2=D0=B0?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=E2=80=93=20=D0=BC=D0=B5=D1=82=D0=BE=D0=B4?= =?UTF-8?q?=20=D1=82=D1=80=D0=B0=D0=BF=D0=B5=D1=86=D0=B8=D0=B9.=20(#41)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательный вариант(SEQ) В последовательной версии используется метод run() класса TestTaskSequential, который выполняет интегрирование на заданном отрезке [a, b] используя epsilon для контроля точности. Основные этапы работы: - **Валидация и подготовка данных:** Метод validation() проверяет корректность входных данных, включая размеры массивов и допустимость значения epsilon . - **Подготовка разбиений:** Метод pre_processing() определяет количество разбиений cnt_of_splits, в зависимости от заданной точности epsilon, шаг разбиения h, который равен (b - a) / cnt_of_splits, и инициализирует локальные переменные. - **Интегрирование:** Метод run() вычисляет площадь трапеций на отрезке [a, b], проходя по разбиениям и суммируя результаты. - **Постобработка данных:** В методе post_processing() результат интегрирования сохраняется в выходной массив. Параллельный вариант (MPI) В параллельной версии используется метод run() класса TestMPITaskParallel, который распределяет интервал между процессами, основываясь на epsilon для контроля точности. Основные этапы работы: - **Распределение данных:** Метод pre_processing() делит отрезок [a, b] между процессами, определяя локальные переменные для каждого процесса (начало отрезка local_a и количество разбиений local_cnt_of_splits). Для синхронизации используется broadcast(). - **Локальное интегрирование:** Каждый процесс в методе run() выполняет интегрирование на своем участке и накапливает локальный результат local_res. - **Сбор результатов:** Метод reduce() суммирует локальные результаты в переменную res в процессе с рангом 0. - **Постобработка данных:** В методе post_processing() результат интегрирования сохраняется в выходной массив, если процесс с рангом 0. --- .../func_tests/main.cpp | 260 ++++++++++++++++++ .../include/ops_mpi.hpp | 59 ++++ .../perf_tests/main.cpp | 83 ++++++ .../src/ops_mpi.cpp | 99 +++++++ .../func_tests/main.cpp | 196 +++++++++++++ .../include/ops_seq.hpp | 21 ++ .../perf_tests/main.cpp | 61 ++++ .../src/ops_seq.cpp | 42 +++ 8 files changed, 821 insertions(+) create mode 100644 tasks/mpi/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp create mode 100644 tasks/mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp create mode 100644 tasks/mpi/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp create mode 100644 tasks/mpi/lysov_i_integration_the_trapezoid_method/src/ops_mpi.cpp create mode 100644 tasks/seq/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp create mode 100644 tasks/seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp create mode 100644 tasks/seq/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp create mode 100644 tasks/seq/lysov_i_integration_the_trapezoid_method/src/ops_seq.cpp diff --git a/tasks/mpi/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp b/tasks/mpi/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp new file mode 100644 index 00000000000..588a41c4553 --- /dev/null +++ b/tasks/mpi/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp @@ -0,0 +1,260 @@ +#include + +#include +#include +#include +#include + +#include "mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp" + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_1) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = -1.45; + double b = 0.0; + double epsilon = 0.000001; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_2) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = 0.0; + double b = 1.45; + double epsilon = 0.000001; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_3) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = -10.0; + double b = 65.0; + double epsilon = 0.000001; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_4) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = -5.0; + double b = 5.0; + double epsilon = 0.000001; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_random) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + std::random_device dev; + std::mt19937 gen(dev()); + double a = (gen() % 100) / 100.0; + double b = (gen() % 100) / 100.0; + if (a == b) b += 0.1; + double epsilon = 0.0001; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, TaskMpi_InputSizeLessThan3) { + std::shared_ptr taskDataMPIParallel = std::make_shared(); + boost::mpi::communicator world; + if (world.rank() == 0) { + double a = -1.0; + double b = 1.0; + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&a)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&b)); + double result = 0.0; + taskDataMPIParallel->outputs.emplace_back(reinterpret_cast(&result)); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testTaskMPIParallel(taskDataMPIParallel); + ASSERT_EQ(testTaskMPIParallel.validation(), false); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, TaskMpi_InputSizeMoreThan3) { + std::shared_ptr taskDataMPIParallel = std::make_shared(); + boost::mpi::communicator world; + if (world.rank() == 0) { + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + double extra_input = 5.0; + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&a)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&b)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&extra_input)); + double result = 0.0; + taskDataMPIParallel->outputs.emplace_back(reinterpret_cast(&result)); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testTaskMPIParallel(taskDataMPIParallel); + ASSERT_EQ(testTaskMPIParallel.validation(), false); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, TaskMpi_OutputSizeMoreThan1) { + std::shared_ptr taskDataMPIParallel = std::make_shared(); + boost::mpi::communicator world; + if (world.rank() == 0) { + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&a)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&b)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&epsilon)); + double result1 = 0.0; + double result2 = 0.0; + taskDataMPIParallel->outputs.emplace_back(reinterpret_cast(&result1)); + taskDataMPIParallel->outputs.emplace_back(reinterpret_cast(&result2)); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testTaskMPIParallel(taskDataMPIParallel); + ASSERT_EQ(testTaskMPIParallel.validation(), false); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, TaskMpi_OutputSizeLessThan1) { + std::shared_ptr taskDataMPIParallel = std::make_shared(); + boost::mpi::communicator world; + if (world.rank() == 0) { + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&a)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&b)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&epsilon)); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testTaskMPIParallel(taskDataMPIParallel); + ASSERT_EQ(testTaskMPIParallel.validation(), false); + } +} diff --git a/tasks/mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp b/tasks/mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp new file mode 100644 index 00000000000..006a91b740e --- /dev/null +++ b/tasks/mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp @@ -0,0 +1,59 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace lysov_i_integration_the_trapezoid_method_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double a = 0.0; + double b = 0.0; + double h = 0.0; + int cnt_of_splits = 0; + double epsilon; + double static function_square(double x) { return x * x; } + + private: + std::vector input_; + double res{}; + std::string ops; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double a = 0.0; + double b = 0.0; + double h = 0.0; + int cnt_of_splits = 0; + double local_a; + int local_cnt_of_splits; + static double function_square(double x) { return x * x; } + + private: + std::vector input_, local_input_; + double res; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace lysov_i_integration_the_trapezoid_method_mpi \ No newline at end of file diff --git a/tasks/mpi/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp b/tasks/mpi/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp new file mode 100644 index 00000000000..5dd51ff1716 --- /dev/null +++ b/tasks/mpi/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp @@ -0,0 +1,83 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp" + +TEST(lysov_i_integration_the_trapezoid_method_mpi, test_integration_pipeline_run) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + double a = -1.45; + double b = 1.45; + double epsilon = 0.0000001; + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, test_integration_task_run) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + double a = -1.45; + double b = 1.45; + double epsilon = 0.0000001; + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double reference_result = 2.0; + ASSERT_NEAR(reference_result, global_result[0], 1e-1); + } +} diff --git a/tasks/mpi/lysov_i_integration_the_trapezoid_method/src/ops_mpi.cpp b/tasks/mpi/lysov_i_integration_the_trapezoid_method/src/ops_mpi.cpp new file mode 100644 index 00000000000..08f8c9c6734 --- /dev/null +++ b/tasks/mpi/lysov_i_integration_the_trapezoid_method/src/ops_mpi.cpp @@ -0,0 +1,99 @@ +#include "mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return (taskData->inputs.size() == 3 && taskData->outputs.size() == 1); +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + epsilon = *reinterpret_cast(taskData->inputs[2]); + cnt_of_splits = static_cast(std::abs((b - a)) / epsilon); + h = (b - a) / cnt_of_splits; + input_.resize(cnt_of_splits + 1); + for (int i = 0; i <= cnt_of_splits; ++i) { + double x = a + i * h; + input_[i] = function_square(x); + } + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential::run() { + internal_order_test(); + double result = 0.0; + result += 0.5 * (function_square(a) + function_square(b)); + for (int i = 1; i < cnt_of_splits; ++i) { + double x = a + i * h; + result += function_square(x); + } + result *= h; + res = result; + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if ((taskData->inputs.size() != 3) || (taskData->outputs.size() != 1)) { + return false; + } + double epsilon = *reinterpret_cast(taskData->inputs[2]); + if (epsilon <= 0) { + return false; + } + } + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + double epsilon = *reinterpret_cast(taskData->inputs[2]); + cnt_of_splits = static_cast(std::abs((b - a)) / epsilon); + } + + boost::mpi::broadcast(world, a, 0); + boost::mpi::broadcast(world, b, 0); + boost::mpi::broadcast(world, cnt_of_splits, 0); + + h = (b - a) / cnt_of_splits; + local_cnt_of_splits = cnt_of_splits / world.size(); + if (world.rank() < cnt_of_splits % world.size()) { + local_cnt_of_splits++; + } + local_a = a + world.rank() * local_cnt_of_splits * h; + local_input_.resize(local_cnt_of_splits + 1); + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel::run() { + internal_order_test(); + double local_res = 0.0; + local_res += 0.5 * (function_square(local_a) + function_square(local_a + local_cnt_of_splits * h)); + for (int i = 0; i < local_cnt_of_splits; i++) { + double x = local_a + i * h; + local_res += function_square(x); + } + local_res *= h; + boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res; + } + return true; +} diff --git a/tasks/seq/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp b/tasks/seq/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp new file mode 100644 index 00000000000..28c90b06220 --- /dev/null +++ b/tasks/seq/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp @@ -0,0 +1,196 @@ +#include + +#include +#include + +#include "seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp" + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest) { + double a = 0.0; + double b = 1.45; + double epsilon = 1e-2; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 1.016; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest2) { + double a = -1.45; + double b = 0.0; + double epsilon = 0.01; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 1.016; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest3) { + double a = -1.45; + double b = 1.45; + double epsilon = 0.01; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 2.03; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest4) { + double a = 1.45; + double b = 0; + double epsilon = 0.01; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = -1.016; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest5) { + double a = 0.0; + double b = 100.0; + double epsilon = 0.001; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 333333.333510; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest6) { + double a = -10.0; + double b = 65.0; + double epsilon = 0.01; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 91875.001; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest7) { + double a = -10.0; + double b = 10.0; + double epsilon = 0.001; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + std::cout << output << std::endl; + double expected_result = 666.66666; + ASSERT_NEAR(output, expected_result, 1e-2); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, InputSizeLessThan3) { + std::shared_ptr taskDataSeq = std::make_shared(); + double a = -1.0; + double b = 1.0; + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + double result = 0.0; + taskDataSeq->outputs.emplace_back(reinterpret_cast(&result)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, InputSizeMoreThan3) { + std::shared_ptr taskDataSeq = std::make_shared(); + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + double extra_input = 5.0; + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&extra_input)); + double result = 0.0; + taskDataSeq->outputs.emplace_back(reinterpret_cast(&result)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, OutputSizeLessThan1) { + std::shared_ptr taskDataSeq = std::make_shared(); + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, OutputSizeMoreThan1) { + std::shared_ptr taskDataSeq = std::make_shared(); + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + double result1 = 0.0; + double result2 = 0.0; + taskDataSeq->outputs.emplace_back(reinterpret_cast(&result1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&result2)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} diff --git a/tasks/seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp b/tasks/seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp new file mode 100644 index 00000000000..e889c5a868f --- /dev/null +++ b/tasks/seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp @@ -0,0 +1,21 @@ +#pragma once +#include +#include + +#include "core/task/include/task.hpp" +namespace lysov_i_integration_the_trapezoid_method_seq { +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double a, b, cnt_of_splits, epsilon, h; + static double function_square(double x) { return x * x; } + + private: + std::vector input_; + double res{}; +}; +} // namespace lysov_i_integration_the_trapezoid_method_seq \ No newline at end of file diff --git a/tasks/seq/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp b/tasks/seq/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp new file mode 100644 index 00000000000..26bbd7e8054 --- /dev/null +++ b/tasks/seq/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp @@ -0,0 +1,61 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp" +TEST(lysov_i_integration_the_trapezoid_method_seq, test_pipeline_run) { + double a = 0.0; + double b = 1.45; + double epsilon = 0.0000001; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 1.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + auto testTaskSequential = + std::make_shared(taskData); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_result = 1.0; + ASSERT_NEAR(output, expected_result, 1e-1); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, test_task_run) { + double a = 0.0; + double b = 1.45; + double epsilon = 0.0000001; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 1.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + auto testTaskSequential = + std::make_shared(taskData); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_result = 1.0; + ASSERT_NEAR(output, expected_result, 1e-1); +} diff --git a/tasks/seq/lysov_i_integration_the_trapezoid_method/src/ops_seq.cpp b/tasks/seq/lysov_i_integration_the_trapezoid_method/src/ops_seq.cpp new file mode 100644 index 00000000000..633c87631fc --- /dev/null +++ b/tasks/seq/lysov_i_integration_the_trapezoid_method/src/ops_seq.cpp @@ -0,0 +1,42 @@ +#include "seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp" + +#include +using namespace std::chrono_literals; +bool lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential::validation() { + internal_order_test(); + return (taskData->inputs.size() == 3 && taskData->outputs.size() == 1); +} + +bool lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + epsilon = *reinterpret_cast(taskData->inputs[2]); + cnt_of_splits = static_cast(std::abs((b - a)) / epsilon); + h = (b - a) / cnt_of_splits; + input_.resize(cnt_of_splits + 1); + for (int i = 0; i <= cnt_of_splits; ++i) { + double x = a + i * h; + input_[i] = function_square(x); + } + return true; +} + +bool lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential::run() { + internal_order_test(); + double result = 0.0; + result += 0.5 * (function_square(a) + function_square(b)); + for (int i = 1; i < cnt_of_splits; ++i) { + double x = a + i * h; + result += function_square(x); + } + result *= h; + res = result; + return true; +} + +bool lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} \ No newline at end of file From 16fc61fd101c8492c5a00ef2454ec03b50650d34 Mon Sep 17 00:00:00 2001 From: Kirius257 <113035841+Kirius257@users.noreply.github.com> Date: Fri, 1 Nov 2024 03:37:18 +0300 Subject: [PATCH 035/155] =?UTF-8?q?=D0=A5=D0=BE=D0=BB=D0=B8=D0=BD=20=D0=9A?= =?UTF-8?q?=D0=B8=D1=80=D0=B8=D0=BB=D0=BB.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=208.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5=20?= =?UTF-8?q?=D0=BE=D1=82=D0=BB=D0=B8=D1=87=D0=B0=D1=8E=D1=89=D0=B8=D1=85?= =?UTF-8?q?=D1=81=D1=8F=20=D0=BF=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D1=8E=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#?= =?UTF-8?q?39)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Описание алгоритмов _Последовательная программа:_ 1. Объявляются локальные переменные под хранение наибольшей разницы между соседними элементами вектора и текущий индекс элемента вектора, по которому можно вычислить наибольшую разницу. 2. Получаем итератор текущий(итератор начала),итератор после текущего и итератор после конца. 3. В цикле while обходим вектор, пока не дойдём до конца, и находим разницу по модулю между двумя соседними элементами, на которые указывают итератор после текущего и текущий итератор. 4. Результатом работы цикла будет индекс левого соседнего элемента и наибольшая разница по значению между двумя соседними элементами вектора _Параллельная программа_ 1. Отдаём вектор процессу с рангом 0 и вычисляем долю элементов для распределения остальным процессам,. 2. Применяем операцию MPI_Bcast, чтобы отослать вычисленную долю данных вектора процессам. 3. Используем коллективную операцию распределения различающихся данных MPI_Scatter для распределения всем процессам равных по размеру, но различных по элементам частей вектора в соответствующие буферы данных. 4. Каждый процесс посчитает, следуя алгоритму вычисления, приведённого ранее, максимальную разницу(назовём её максимальной дельтой) в своём подвекторе. и упакует результаты в соответствующие буферы данных. 5. Используем коллективную операцию MPI_Reduce и в качестве одного из её параметров выберем операцию MPI_MAX, которая найдёт максимум среди всех максимальных дельт, отправляемых процессами. 6. В качестве результата MPI_Reduce в соответствующей переменной будет храниться искомый результат задачи. --- .../func_tests/main.cpp | 528 ++++++++++++++++++ .../include/ops_mpi.hpp | 279 +++++++++ .../perf_tests/main.cpp | 102 ++++ .../src/ops_mpi.cpp | 1 + .../func_tests/main.cpp | 291 ++++++++++ .../include/ops_seq.hpp | 121 ++++ .../perf_tests/main.cpp | 80 +++ .../src/ops_seq.cpp | 1 + 8 files changed, 1403 insertions(+) create mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp create mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp create mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp create mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp create mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp create mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp create mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp create mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp new file mode 100644 index 00000000000..6e1acea30dc --- /dev/null +++ b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp @@ -0,0 +1,528 @@ + +#include + +#include +#include +#include + +#include "mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp" + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_validation) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + const int count_size_vector = 500; + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::vector global_vec; + std::vector global_delta(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + global_vec = std::vector(count_size_vector); + + global_vec[100] = 5000; + global_vec[101] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testMPITaskSequential(taskDataSeq, op); + ASSERT_EQ(testMPITaskSequential.validation(), true); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_pre_processing) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + const int count_size_vector = 500; + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::vector global_vec; + std::vector global_delta(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + global_vec = std::vector(count_size_vector); + + global_vec[100] = 5000; + global_vec[101] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + ASSERT_EQ(testMpiTaskParallel.pre_processing(), true); + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + ASSERT_EQ(testTaskSequential.pre_processing(), true); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_run) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + const int count_size_vector = 150; + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::vector global_vec; + std::vector global_delta(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + global_vec[100] = 5000; + global_vec[101] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + ASSERT_EQ(testMpiTaskParallel.run(), true); + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + ASSERT_EQ(testTaskSequential.run(), true); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_post_processing) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + const int count_size_vector = 500; + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::vector global_vec; + std::vector global_delta(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + global_vec[100] = 5000; + global_vec[101] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + ASSERT_EQ(testMpiTaskParallel.post_processing(), true); + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + ASSERT_EQ(testTaskSequential.post_processing(), true); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_int) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + const int count_size_vector = 200; + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + + global_vec[100] = 5000; + global_vec[101] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int test2 = reference_elems[0] - reference_elems[1]; + ASSERT_EQ(test, test2); + } +} +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_int32_t) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + if (ProcRank == 0) { + const int count_size_vector = 300; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 2 * i + 4; + } + global_vec[100] = 5000; + global_vec[101] = 1; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int32_t test2 = reference_elems[0] - reference_elems[1]; + ASSERT_EQ(test, test2); + } +} +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_int_with_random) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + if (ProcRank == 0) { + const int count_size_vector = 300; + global_vec = kholin_k_vector_neighbor_diff_elems_mpi::get_random_vector(count_size_vector); + global_vec[100] = 5000; + global_vec[101] = 1; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int test2 = reference_elems[0] - reference_elems[1]; + ASSERT_EQ(test, test2); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_float) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + const int count_size_vector = 1000; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 0.25 * i + 10; + } + + global_vec[100] = 110.001f; + global_vec[101] = -990.0025f; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + float test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + float test2 = reference_elems[0] - reference_elems[1]; + ASSERT_NEAR(test, test2, 1e-5); + } +} +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_float_with_random) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + const int count_size_vector = 1000; + global_vec = kholin_k_vector_neighbor_diff_elems_mpi::get_random_vector(count_size_vector); + global_vec[100] = 110.001f; + global_vec[101] = -990.0025f; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + float test2 = reference_elems[0] - reference_elems[1]; + ASSERT_NEAR(test, test2, 1e-5); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_double) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + const int count_size_vector = 750; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 0.25 * i + 10; + } + + global_vec[100] = 110.001; + global_vec[101] = -990.0025; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + double test2 = reference_elems[0] - reference_elems[1]; + ASSERT_NEAR(test, test2, 1e-5); + } +} \ No newline at end of file diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp new file mode 100644 index 00000000000..4445f74c33e --- /dev/null +++ b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp @@ -0,0 +1,279 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace enum_ops { +enum operations { MAX_DIFFERENCE }; +}; + +namespace kholin_k_vector_neighbor_diff_elems_mpi { + +template +std::vector get_random_vector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + + if (std::is_integral::value) { + std::uniform_int_distribution dist(0, 99); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + } else if (std::is_floating_point::value) { + std::uniform_real_distribution dist(0.0, 99.0); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + } else { + throw std::invalid_argument("TypeElem must be an integral or floating point type"); + } + + return vec; +} + +template +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_, enum_ops::operations ops_) + : Task(std::move(taskData_)), ops(ops_) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + double result; + TypeIndex left_index; + TypeIndex right_index; + TypeElem left_elem; + TypeElem right_elem; + enum_ops::operations ops; +}; + +template +bool TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); + result = {}; + left_index = {}; + right_index = 2; + left_elem = {}; + right_elem = {}; + return true; +} + +template +bool TestTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 2 && taskData->outputs_count[1] == 2; +} + +template +bool TestTaskSequential::run() { + internal_order_test(); + if (ops == enum_ops::MAX_DIFFERENCE) { + double max_delta = 0; + double delta = 0; + size_t curr_index = 0; + auto iter_curr = input_.begin(); + auto iter_next = iter_curr + 1; + auto iter_end = input_.end() - 1; + auto iter_begin = input_.begin(); + while (iter_curr != iter_end) { + delta = abs(*iter_next - *iter_curr); + if (delta > max_delta) { + if (iter_begin == iter_curr) { + curr_index = 0; + max_delta = delta; + } else { + curr_index = std::distance(input_.begin(), iter_curr); + max_delta = delta; + } + } + iter_curr++; + iter_next = iter_curr + 1; + } + result = max_delta; + right_index = curr_index + 1; + left_index = curr_index; + left_elem = input_[left_index]; + right_elem = input_[right_index]; + } + return true; +} + +template +bool TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = left_elem; + reinterpret_cast(taskData->outputs[0])[1] = right_elem; + reinterpret_cast(taskData->outputs[1])[0] = left_index; + reinterpret_cast(taskData->outputs[1])[1] = right_index; + reinterpret_cast(taskData->outputs[2])[0] = result; + return true; +} + +template +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_, enum_ops::operations ops_) + : Task(std::move(taskData_)), ops(ops_) {} + + MPI_Datatype get_mpi_type(); + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + ~TestMPITaskParallel() override { MPI_Type_free(&mpi_type_elem); } + + private: + std::vector input_; + std::vector local_input_; + unsigned int delta_n; + unsigned int delta_n_r; + double result; + unsigned int residue; + enum_ops::operations ops; + MPI_Datatype mpi_type_elem; + void print_local_data(); + double max_difference(); +}; + +template +MPI_Datatype TestMPITaskParallel::get_mpi_type() { + MPI_Type_contiguous(sizeof(TypeElem), MPI_BYTE, &mpi_type_elem); + MPI_Type_commit(&mpi_type_elem); + return mpi_type_elem; +} + +template +bool TestMPITaskParallel::pre_processing() { + internal_order_test(); + int ProcRank = 0; + int size = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + if (ProcRank == 0) { + delta_n = taskData->inputs_count[0] / size; + delta_n_r = {}; + } + MPI_Bcast(&delta_n, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); + if (ProcRank == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); + } + if (ProcRank == 0) { + residue = taskData->inputs_count[0] % size; + delta_n_r = delta_n + residue; + local_input_ = std::vector(delta_n_r); + } else { + local_input_ = std::vector(delta_n); + } + MPI_Scatter(input_.data(), delta_n, mpi_type_elem, local_input_.data(), delta_n, mpi_type_elem, 0, MPI_COMM_WORLD); + if (ProcRank == 0) { + for (unsigned int i = delta_n; i < delta_n_r; i++) { + local_input_[i] = input_[i]; + } + } + result = {}; + residue = {}; + return true; +} + +template +bool TestMPITaskParallel::validation() { + internal_order_test(); + mpi_type_elem = get_mpi_type(); + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + if (ProcRank == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +template +bool TestMPITaskParallel::run() { + internal_order_test(); + double local_result = 0; + local_result = max_difference(); + if (ops == enum_ops::MAX_DIFFERENCE) { + double sendbuf1[1]; + sendbuf1[0] = local_result; + MPI_Reduce(sendbuf1, &result, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); + } + // finalisation + return true; +} + +template +bool TestMPITaskParallel::post_processing() { + internal_order_test(); + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + if (ProcRank == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } + return true; +} + +template +void TestMPITaskParallel::print_local_data() { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + if (ProcRank == 0) { + std::cout << "I'm proc 0" << "and my local_input data is "; + for (unsigned int i = 0; i < delta_n_r; i++) { + std::cout << local_input_[i] << " "; + } + std::cout << std::endl; + } else { + std::cout << "I'm" << ProcRank << " proc " << "and my local_input data is "; + for (unsigned int i = 0; i < delta_n; i++) { + std::cout << local_input_[i] << " "; + } + std::cout << std::endl; + } +} + +template +double TestMPITaskParallel::max_difference() { + double max_delta = 0; + double delta = 0; + double local_result = 0; + auto iter_curr = local_input_.begin(); + auto iter_next = iter_curr + 1; + auto iter_end = local_input_.end() - 1; + while (iter_curr != iter_end) { + delta = abs((double)(*iter_next - *iter_curr)); + if (delta > max_delta) { + max_delta = delta; + } + iter_curr++; + iter_next = iter_curr + 1; + local_result = max_delta; + } + return local_result; +} +} // namespace kholin_k_vector_neighbor_diff_elems_mpi \ No newline at end of file diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp new file mode 100644 index 00000000000..15f2944ebe2 --- /dev/null +++ b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp @@ -0,0 +1,102 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp" + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, test_pipeline_run) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_elems(2, 0); + std::vector global_indices(2, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::shared_ptr taskDataPar = std::make_shared(); + if (ProcRank == 0) { + const float count_size_vector = 100000000; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + + global_vec[10] = 5000; + global_vec[11] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_elems.data())); + taskDataPar->outputs_count.emplace_back(global_elems.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices.data())); + taskDataPar->outputs_count.emplace_back(global_indices.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar, op); + testMpiTaskParallel->validation(); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (ProcRank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, test_task_run) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_elems(2, 0); + std::vector global_indices(2, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + if (ProcRank == 0) { + const float count_size_vector = 100000000; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + + global_vec[10] = 5000; + global_vec[11] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_elems.data())); + taskDataPar->outputs_count.emplace_back(global_elems.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices.data())); + taskDataPar->outputs_count.emplace_back(global_indices.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar, op); + testMpiTaskParallel->validation(); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (ProcRank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp new file mode 100644 index 00000000000..94c0fb83537 --- /dev/null +++ b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp @@ -0,0 +1 @@ +#include "mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp" \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp new file mode 100644 index 00000000000..4d8e3c4930b --- /dev/null +++ b/tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp @@ -0,0 +1,291 @@ +#include + +#include + +#include "core/task/include/task.hpp" +#include "seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp" + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_pre_processing) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + + EXPECT_EQ(testTaskSequential.pre_processing(), true); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_validation) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + EXPECT_EQ(testTaskSequential.validation(), true); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_run) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + EXPECT_EQ(testTaskSequential.run(), true); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_post_processing) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + EXPECT_EQ(testTaskSequential.post_processing(), true); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_int32_t) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = 2 * i; + } + in[234] = 0; + in[235] = 4000; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_EQ(out[0], 0l); + EXPECT_EQ(out[1], 4000l); + EXPECT_EQ(out_index[0], 234ull); + EXPECT_EQ(out_index[1], 235ull); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_int_with_random) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + in = kholin_k_vector_neighbor_diff_elems_seq::get_random_vector(1256); + in[234] = 0; + in[235] = 4000; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_EQ(out[0], 0l); + EXPECT_EQ(out[1], 4000l); + EXPECT_EQ(out_index[0], 234ull); + EXPECT_EQ(out_index[1], 235ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_double) { + std::vector in(25680, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = i; + } + in[189] = -1000.1; + in[190] = 9000.9; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_NEAR(out[0], -1000.1, 1e-6); + EXPECT_NEAR(out[1], 9000.9, 1e-6); + EXPECT_EQ(out_index[0], 189ull); + EXPECT_EQ(out_index[1], 190ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_int8_t) { + std::vector in(250, -1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + if (i % 2 == 0) { + in[i] = -50; + } else { + in[i] = 50; + } + } + in[5] = 56; + in[6] = -56; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_EQ(out[0], 56); + EXPECT_EQ(out[1], -56); + EXPECT_EQ(out_index[0], 5ull); + EXPECT_EQ(out_index[1], 6ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_int64_t) { + std::vector in(75836, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + if (i % 3 == 0) { + in[i] = 10; + } + if (i % 3 == 1) { + in[i] = 30; + } + if (i % 3 == 2) { + in[i] = 70; + } + } + in[20] = -1000; + in[21] = 1119; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_EQ(out[0], -1000ll); + EXPECT_EQ(out[1], 1119ll); + EXPECT_EQ(out_index[0], 20ull); + EXPECT_EQ(out_index[1], 21ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_float) { + std::vector in(20, 1.0f); + std::vector out(2, 0.0f); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] += (i + 1.0f) * 2.5f; + } + in[0] = 110.001f; + in[1] = -990.0025f; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_NEAR(out[0], 110.001f, 1e-4); + EXPECT_NEAR(out[1], -990.0025f, 1e-4); + EXPECT_EQ(out_index[0], 0ull); + EXPECT_EQ(out_index[1], 1ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_float_with_random) { + std::vector in(20, 1.0f); + std::vector out(2, 0.0f); + std::vector out_index(2, 0); + in = kholin_k_vector_neighbor_diff_elems_seq::get_random_vector(20); + in[0] = 110.001f; + in[1] = -990.0025f; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_NEAR(out[0], 110.001f, 1e-4); + EXPECT_NEAR(out[1], -990.0025f, 1e-4); + EXPECT_EQ(out_index[0], 0ull); + EXPECT_EQ(out_index[1], 1ull); +} \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp new file mode 100644 index 00000000000..116058ae9ac --- /dev/null +++ b/tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +using namespace std::chrono_literals; + +namespace kholin_k_vector_neighbor_diff_elems_seq { + +template +std::vector get_random_vector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + + if (std::is_integral::value) { + std::uniform_int_distribution dist(0, 99); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + } else if (std::is_floating_point::value) { + std::uniform_real_distribution dist(0.0, 99.0); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + } else { + throw std::invalid_argument("TypeElem must be an integral or floating point type"); + } + + return vec; +} + +template +class MostDiffNeighborElements : public ppc::core::Task { + public: + explicit MostDiffNeighborElements(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + double result; + TypeIndex left_index; + TypeIndex right_index; + TypeElem left_elem; + TypeElem right_elem; +}; + +template +bool MostDiffNeighborElements::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); + result = {}; + left_index = {}; + right_index = 2; + left_elem = {}; + right_elem = {}; + return true; +} + +template +bool MostDiffNeighborElements::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 2 && taskData->outputs_count[1] == 2; +} + +template +bool MostDiffNeighborElements::run() { + internal_order_test(); + double max_delta = 0; + double delta = 0; + size_t curr_index = 0; + auto iter_curr = input_.begin(); + auto iter_next = iter_curr + 1; + auto iter_end = input_.end() - 1; + auto iter_begin = input_.begin(); + while (iter_curr != iter_end) { + delta = fabs((double)(*iter_next - *iter_curr)); + if (delta > max_delta) { + if (iter_begin == iter_curr) { + curr_index = 0; + max_delta = delta; + } else { + curr_index = std::distance(input_.begin(), iter_curr); + max_delta = delta; + } + } + iter_curr++; + iter_next = iter_curr + 1; + } + result = max_delta; + right_index = curr_index + 1; + left_index = curr_index; + left_elem = input_[left_index]; + + right_elem = input_[right_index]; + return true; +} + +template +bool MostDiffNeighborElements::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = left_elem; + reinterpret_cast(taskData->outputs[0])[1] = right_elem; + reinterpret_cast(taskData->outputs[1])[0] = left_index; + reinterpret_cast(taskData->outputs[1])[1] = right_index; + return true; +} +} // namespace kholin_k_vector_neighbor_diff_elems_seq \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp new file mode 100644 index 00000000000..f3b70f43b2d --- /dev/null +++ b/tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp @@ -0,0 +1,80 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp" + +TEST(kholin_k_vector_neighbor_diff_elems_seq, test_pipeline_run) { + const int count = 20000000; + + std::vector in(count, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = i; + } + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + auto testTaskSequential = + std::make_shared>(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, test_task_run) { + const int count = 250000000; + + std::vector in(count, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = i; + } + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + auto testTaskSequential = + std::make_shared>(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp new file mode 100644 index 00000000000..c67ad3aabd7 --- /dev/null +++ b/tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp @@ -0,0 +1 @@ +#include "seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp" \ No newline at end of file From f2bdd7874b625042ad317f5eed90c300936c2cc5 Mon Sep 17 00:00:00 2001 From: NikKazzzzzz <127418135+NikKazzzzzz@users.noreply.github.com> Date: Fri, 1 Nov 2024 03:40:36 +0300 Subject: [PATCH 036/155] =?UTF-8?q?=D0=9A=D0=B0=D0=B7=D1=83=D0=BD=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=9D=D0=B8=D0=BA=D0=B8=D1=82=D0=B0.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2023.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D1=91=D1=82?= =?UTF-8?q?=20=D1=87=D0=B0=D1=81=D1=82=D0=BE=D1=82=D1=8B=20=D1=81=D0=B8?= =?UTF-8?q?=D0=BC=D0=B2=D0=BE=D0=BB=D0=B0=20=D0=B2=20=D1=81=D1=82=D1=80?= =?UTF-8?q?=D0=BE=D0=BA=D0=B5.=20(#52)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание задачи Задача заключается в подсчете частоты появления заданного символа в строке, используя как последовательный, так и параллельный подходы. Основная цель — продемонстрировать, как различные методы обработки данных влияют на производительность и эффективность выполнения. Последовательный вариант В последовательном варианте реализация использует стандартную библиотеку C++ для подсчета символа в строке. Код проходит по каждому символу строки и считает количество вхождений целевого символа. Этот метод прост в реализации, но может быть неэффективным для больших строк, так как выполняет всю работу на одном потоке. Параллельный вариант (MPI) Параллельный вариант использует библиотеку MPI (Message Passing Interface) для распределения нагрузки между несколькими процессами. В этом варианте строка разбивается на сегменты, которые обрабатываются параллельно, что позволяет значительно ускорить подсчет вхождений символа в больших строках. Каждый процесс считает количество символов в своем сегменте, после чего результаты объединяются на главном процессе. Это позволяет эффективно использовать вычислительные ресурсы и уменьшить время выполнения задачи при работе с большими объемами данных. --- .../func_tests/main.cpp | 207 ++++++++++++++++++ .../include/ops_mpi.hpp | 49 +++++ .../perf_tests/main.cpp | 93 ++++++++ .../src/ops_mpi.cpp | 99 +++++++++ .../func_tests/main.cpp | 138 ++++++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 84 +++++++ .../src/ops_seq.cpp | 37 ++++ 8 files changed, 731 insertions(+) create mode 100644 tasks/mpi/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp create mode 100644 tasks/mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp create mode 100644 tasks/mpi/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp create mode 100644 tasks/mpi/kazunin_n_count_freq_a_char_in_string/src/ops_mpi.cpp create mode 100644 tasks/seq/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp create mode 100644 tasks/seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp create mode 100644 tasks/seq/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp create mode 100644 tasks/seq/kazunin_n_count_freq_a_char_in_string/src/ops_seq.cpp diff --git a/tasks/mpi/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp new file mode 100644 index 00000000000..9f05419e415 --- /dev/null +++ b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp @@ -0,0 +1,207 @@ +// Copyright 2023 Nesterov Alexander + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp" + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_large_random_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'x'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 1000000; + global_str.resize(count_size_str); + std::random_device rd; + std::mt19937 eng(rd()); + std::uniform_int_distribution<> distr(0, 25); + + std::generate(global_str.begin(), global_str.end(), [&]() { return 'a' + distr(eng); }); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_no_target_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'p'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 500; + global_str = std::vector(count_size_str, 'f'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 0); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_empty_string) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 0); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_diff_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'z'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', + 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_all_char_is_same) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'p'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 325; + global_str = std::vector(count_size_str, 'p'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} // namespace kazunin_n_count_freq_a_char_in_string_mpi diff --git a/tasks/mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp new file mode 100644 index 00000000000..b17d31d374a --- /dev/null +++ b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2023 Nesterov Alexander + +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kazunin_n_count_freq_a_char_in_string_mpi { +class CharFreqCounterMPISequential : public ppc::core::Task { + public: + explicit CharFreqCounterMPISequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool run() override; + bool validation() override; + bool pre_processing() override; + bool post_processing() override; + + private: + size_t count_result_{0}; + char character_to_count_{}; + std::vector input_string_; +}; + +class CharFreqCounterMPIParallel : public ppc::core::Task { + public: + explicit CharFreqCounterMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool run() override; + bool validation() override; + bool pre_processing() override; + bool post_processing() override; + + private: + size_t total_count_{0}; + size_t local_count_{0}; + char character_to_count_{}; + std::vector input_string_; + std::vector local_segment_; + boost::mpi::communicator global; +}; +} // namespace kazunin_n_count_freq_a_char_in_string_mpi \ No newline at end of file diff --git a/tasks/mpi/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp new file mode 100644 index 00000000000..81408bde99c --- /dev/null +++ b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp @@ -0,0 +1,93 @@ +// Copyright 2024 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp" + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + char target_char = 'p'; + + if (world.rank() == 0) { + count_size_str = 120; + global_str = std::vector(count_size_str, 'p'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perf_res = std::make_shared(); + auto perf_analyz = std::make_shared(testMpiTaskParallel); + perf_analyz->pipeline_run(perfAttr, perf_res); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_res); + ASSERT_EQ(count_size_str, global_count[0]); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + char target_char = 'p'; + + if (world.rank() == 0) { + count_size_str = 120; + global_str = std::vector(count_size_str, 'p'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perf_res = std::make_shared(); + auto perf_analyz = std::make_shared(testMpiTaskParallel); + perf_analyz->task_run(perfAttr, perf_res); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_res); + ASSERT_EQ(count_size_str, global_count[0]); + } +} diff --git a/tasks/mpi/kazunin_n_count_freq_a_char_in_string/src/ops_mpi.cpp b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/src/ops_mpi.cpp new file mode 100644 index 00000000000..75af53da384 --- /dev/null +++ b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/src/ops_mpi.cpp @@ -0,0 +1,99 @@ +// Copyright 2024 Nesterov Alexander +#include "mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp" + +#include +#include +#include + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential::pre_processing() { + internal_order_test(); + input_string_.assign(reinterpret_cast(taskData->inputs[0]), + reinterpret_cast(taskData->inputs[0]) + taskData->inputs_count[0]); + character_to_count_ = *reinterpret_cast(taskData->inputs[1]); + count_result_ = 0; + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential::run() { + internal_order_test(); + count_result_ = std::count(input_string_.begin(), input_string_.end(), character_to_count_); + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential::post_processing() { + *reinterpret_cast(taskData->outputs[0]) = count_result_; + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel::pre_processing() { + internal_order_test(); + if (global.rank() == 0) { + character_to_count_ = *reinterpret_cast(taskData->inputs[1]); + } + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel::validation() { + internal_order_test(); + return global.rank() != 0 || taskData->outputs_count[0] == 1; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel::run() { + internal_order_test(); + + int my_rank = global.rank(); + auto world_size = global.size(); + int n = 0; + + if (my_rank == 0) { + n = taskData->inputs_count[0]; + input_string_.assign(reinterpret_cast(taskData->inputs[0]), + reinterpret_cast(taskData->inputs[0]) + n); + } + + boost::mpi::broadcast(global, n, 0); + boost::mpi::broadcast(global, character_to_count_, 0); + + auto base_segment_size = n / world_size; + auto extra = n % world_size; + std::vector send_counts(world_size, base_segment_size); + std::vector displacements(world_size, 0); + + for (auto i = 0; i < world_size; ++i) { + if (i < extra) { + ++send_counts[i]; + } + if (i > 0) { + displacements[i] = displacements[i - 1] + send_counts[i - 1]; + } + } + + local_segment_.resize(send_counts[my_rank]); + if (my_rank == 0) { + boost::mpi::scatterv(global, input_string_.data(), send_counts, displacements, local_segment_.data(), + send_counts[my_rank], 0); + } else { + std::vector empty_buffer(0); + boost::mpi::scatterv(global, empty_buffer.data(), send_counts, displacements, local_segment_.data(), + send_counts[my_rank], 0); + } + + local_count_ = std::count(local_segment_.begin(), local_segment_.end(), character_to_count_); + + boost::mpi::reduce(global, local_count_, total_count_, std::plus<>(), 0); + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel::post_processing() { + internal_order_test(); + + if (global.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = total_count_; + } + return true; +} diff --git a/tasks/seq/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp b/tasks/seq/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp new file mode 100644 index 00000000000..5676576392d --- /dev/null +++ b/tasks/seq/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp @@ -0,0 +1,138 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp" + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_numeric_characters) { + std::string test_string = "1122334455"; + + char target_character = '2'; + int expected_count = 2; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + ASSERT_EQ(expected_count, output[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_empty_string) { + std::string test_string; + + char target_character = 'p'; + int expected_count = 0; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + ASSERT_EQ(expected_count, output[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_mixed_characters) { + std::string test_string = "a1b2c3d4a5"; + + char target_character = 'a'; + int expected_count = 2; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + ASSERT_EQ(expected_count, output[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_absent_character_in_repeated_string) { + std::string test_string(500, 'x'); + + char target_character = 'y'; + int expected_count = 0; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + ASSERT_EQ(expected_count, output[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_special_characters) { + std::string test_string = "@@##!!&&"; + + char target_character = '#'; + int expected_count = 2; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + + ASSERT_EQ(expected_count, output[0]); +} diff --git a/tasks/seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp b/tasks/seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp new file mode 100644 index 00000000000..584ec2c3352 --- /dev/null +++ b/tasks/seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace kazunin_n_count_freq_a_char_in_string_seq { + +class CountFreqCharTaskSequential : public ppc::core::Task { + public: + explicit CountFreqCharTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool run() override; + bool validation() override; + bool pre_processing() override; + bool post_processing() override; + + private: + char target_character_{}; + int frequency_count_ = 0; + std::string input_string_; +}; +} // namespace kazunin_n_count_freq_a_char_in_string_seq diff --git a/tasks/seq/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp b/tasks/seq/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp new file mode 100644 index 00000000000..fd72abfffa3 --- /dev/null +++ b/tasks/seq/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp @@ -0,0 +1,84 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp" + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_pipeline_run) { + std::string input_str(95000, 'o'); + char target_char = 'o'; + int expected_frequency = 95000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = + std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_task_run) { + std::string input_str(95000, 'o'); + char target_char = 'o'; + int expected_frequency = 95000; + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = + std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_frequency, out[0]); +} diff --git a/tasks/seq/kazunin_n_count_freq_a_char_in_string/src/ops_seq.cpp b/tasks/seq/kazunin_n_count_freq_a_char_in_string/src/ops_seq.cpp new file mode 100644 index 00000000000..99ae27c5b39 --- /dev/null +++ b/tasks/seq/kazunin_n_count_freq_a_char_in_string/src/ops_seq.cpp @@ -0,0 +1,37 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp" + +#include +#include +#include + +namespace kazunin_n_count_freq_a_char_in_string_seq { +bool kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential::pre_processing() { + internal_order_test(); + input_string_ = *reinterpret_cast(taskData->inputs[0]); + target_character_ = *reinterpret_cast(taskData->inputs[1]); + frequency_count_ = 0; + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 1 && taskData->inputs_count[1] == 1 && taskData->outputs_count[0] == 1; +} + +bool kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential::run() { + internal_order_test(); + for (const auto& ch : input_string_) { + if (ch == target_character_) { + ++frequency_count_; + } + } + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = frequency_count_; + return true; +} +} // namespace kazunin_n_count_freq_a_char_in_string_seq From 1ccc45885d4ad5350d51593e6b65f625ed068f9c Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Fri, 1 Nov 2024 09:10:13 +0800 Subject: [PATCH 037/155] =?UTF-8?q?Revert=20"=D0=A5=D0=BE=D0=BB=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=208.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5?= =?UTF-8?q?=D0=B5=20=D0=BE=D1=82=D0=BB=D0=B8=D1=87=D0=B0=D1=8E=D1=89=D0=B8?= =?UTF-8?q?=D1=85=D1=81=D1=8F=20=D0=BF=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D1=8E=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0?= =?UTF-8?q?."=20(#94)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#39 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11622139585/job/32367125304 MSVC job has failed image image --- .../func_tests/main.cpp | 528 ------------------ .../include/ops_mpi.hpp | 279 --------- .../perf_tests/main.cpp | 102 ---- .../src/ops_mpi.cpp | 1 - .../func_tests/main.cpp | 291 ---------- .../include/ops_seq.hpp | 121 ---- .../perf_tests/main.cpp | 80 --- .../src/ops_seq.cpp | 1 - 8 files changed, 1403 deletions(-) delete mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp delete mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp delete mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp delete mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp delete mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp delete mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp delete mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp delete mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp deleted file mode 100644 index 6e1acea30dc..00000000000 --- a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp +++ /dev/null @@ -1,528 +0,0 @@ - -#include - -#include -#include -#include - -#include "mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp" - -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_validation) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - const int count_size_vector = 500; - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - std::vector global_vec; - std::vector global_delta(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - - if (ProcRank == 0) { - global_vec = std::vector(count_size_vector); - - global_vec[100] = 5000; - global_vec[101] = 1; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testMPITaskSequential(taskDataSeq, op); - ASSERT_EQ(testMPITaskSequential.validation(), true); - } -} - -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_pre_processing) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - const int count_size_vector = 500; - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - std::vector global_vec; - std::vector global_delta(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - - if (ProcRank == 0) { - global_vec = std::vector(count_size_vector); - - global_vec[100] = 5000; - global_vec[101] = 1; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - testMpiTaskParallel.validation(); - ASSERT_EQ(testMpiTaskParallel.pre_processing(), true); - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); - testTaskSequential.validation(); - ASSERT_EQ(testTaskSequential.pre_processing(), true); - } -} - -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_run) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - const int count_size_vector = 150; - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - std::vector global_vec; - std::vector global_delta(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - - if (ProcRank == 0) { - global_vec = std::vector(count_size_vector); - for (size_t i = 0; i < global_vec.size(); i++) { - global_vec[i] = 4 * i + 2; - } - global_vec[100] = 5000; - global_vec[101] = 1; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - testMpiTaskParallel.validation(); - testMpiTaskParallel.pre_processing(); - ASSERT_EQ(testMpiTaskParallel.run(), true); - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - ASSERT_EQ(testTaskSequential.run(), true); - } -} - -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_post_processing) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - const int count_size_vector = 500; - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - std::vector global_vec; - std::vector global_delta(1, 0); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (ProcRank == 0) { - global_vec = std::vector(count_size_vector); - for (size_t i = 0; i < global_vec.size(); i++) { - global_vec[i] = 4 * i + 2; - } - global_vec[100] = 5000; - global_vec[101] = 1; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - testMpiTaskParallel.validation(); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - ASSERT_EQ(testMpiTaskParallel.post_processing(), true); - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - ASSERT_EQ(testTaskSequential.post_processing(), true); - } -} - -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_int) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - const int count_size_vector = 200; - std::vector global_vec; - std::vector global_delta(1, 0); - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - - std::shared_ptr taskDataPar = std::make_shared(); - - if (ProcRank == 0) { - global_vec = std::vector(count_size_vector); - for (size_t i = 0; i < global_vec.size(); i++) { - global_vec[i] = 4 * i + 2; - } - - global_vec[100] = 5000; - global_vec[101] = 1; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - double test = global_delta[0]; - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - int test2 = reference_elems[0] - reference_elems[1]; - ASSERT_EQ(test, test2); - } -} -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_int32_t) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - std::vector global_vec; - std::vector global_delta(1, 0); - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - - std::shared_ptr taskDataPar = std::make_shared(); - if (ProcRank == 0) { - const int count_size_vector = 300; - global_vec = std::vector(count_size_vector); - for (size_t i = 0; i < global_vec.size(); i++) { - global_vec[i] = 2 * i + 4; - } - global_vec[100] = 5000; - global_vec[101] = 1; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - testMpiTaskParallel.validation(); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - double test = global_delta[0]; - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - int32_t test2 = reference_elems[0] - reference_elems[1]; - ASSERT_EQ(test, test2); - } -} -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_int_with_random) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - std::vector global_vec; - std::vector global_delta(1, 0); - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - - std::shared_ptr taskDataPar = std::make_shared(); - if (ProcRank == 0) { - const int count_size_vector = 300; - global_vec = kholin_k_vector_neighbor_diff_elems_mpi::get_random_vector(count_size_vector); - global_vec[100] = 5000; - global_vec[101] = 1; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - testMpiTaskParallel.validation(); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - double test = global_delta[0]; - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - int test2 = reference_elems[0] - reference_elems[1]; - ASSERT_EQ(test, test2); - } -} - -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_float) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - std::vector global_vec; - std::vector global_delta(1, 0); - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - - std::shared_ptr taskDataPar = std::make_shared(); - - if (ProcRank == 0) { - const int count_size_vector = 1000; - global_vec = std::vector(count_size_vector); - for (size_t i = 0; i < global_vec.size(); i++) { - global_vec[i] = 0.25 * i + 10; - } - - global_vec[100] = 110.001f; - global_vec[101] = -990.0025f; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - testMpiTaskParallel.validation(); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - float test = global_delta[0]; - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - float test2 = reference_elems[0] - reference_elems[1]; - ASSERT_NEAR(test, test2, 1e-5); - } -} -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_float_with_random) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - std::vector global_vec; - std::vector global_delta(1, 0); - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - - std::shared_ptr taskDataPar = std::make_shared(); - - if (ProcRank == 0) { - const int count_size_vector = 1000; - global_vec = kholin_k_vector_neighbor_diff_elems_mpi::get_random_vector(count_size_vector); - global_vec[100] = 110.001f; - global_vec[101] = -990.0025f; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - testMpiTaskParallel.validation(); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - double test = global_delta[0]; - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - float test2 = reference_elems[0] - reference_elems[1]; - ASSERT_NEAR(test, test2, 1e-5); - } -} - -TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_double) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - std::vector global_vec; - std::vector global_delta(1, 0); - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - - std::shared_ptr taskDataPar = std::make_shared(); - - if (ProcRank == 0) { - const int count_size_vector = 750; - global_vec = std::vector(count_size_vector); - for (size_t i = 0; i < global_vec.size(); i++) { - global_vec[i] = 0.25 * i + 10; - } - - global_vec[100] = 110.001; - global_vec[101] = -990.0025; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); - taskDataPar->outputs_count.emplace_back(global_delta.size()); - } - - kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); - testMpiTaskParallel.validation(); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - double test = global_delta[0]; - - if (ProcRank == 0) { - std::vector reference_delta(1, 0); - std::vector reference_elems(2, 0); - std::vector reference_indices(2, 0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); - taskDataSeq->outputs_count.emplace_back(reference_elems.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); - taskDataSeq->outputs_count.emplace_back(reference_indices.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); - taskDataSeq->outputs_count.emplace_back(reference_delta.size()); - - kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - double test2 = reference_elems[0] - reference_elems[1]; - ASSERT_NEAR(test, test2, 1e-5); - } -} \ No newline at end of file diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp deleted file mode 100644 index 4445f74c33e..00000000000 --- a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp +++ /dev/null @@ -1,279 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace enum_ops { -enum operations { MAX_DIFFERENCE }; -}; - -namespace kholin_k_vector_neighbor_diff_elems_mpi { - -template -std::vector get_random_vector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - - if (std::is_integral::value) { - std::uniform_int_distribution dist(0, 99); - for (int i = 0; i < sz; i++) { - vec[i] = dist(gen); - } - } else if (std::is_floating_point::value) { - std::uniform_real_distribution dist(0.0, 99.0); - for (int i = 0; i < sz; i++) { - vec[i] = dist(gen); - } - } else { - throw std::invalid_argument("TypeElem must be an integral or floating point type"); - } - - return vec; -} - -template -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_, enum_ops::operations ops_) - : Task(std::move(taskData_)), ops(ops_) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - double result; - TypeIndex left_index; - TypeIndex right_index; - TypeElem left_elem; - TypeElem right_elem; - enum_ops::operations ops; -}; - -template -bool TestTaskSequential::pre_processing() { - internal_order_test(); - input_ = std::vector(taskData->inputs_count[0]); - auto ptr = reinterpret_cast(taskData->inputs[0]); - std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); - result = {}; - left_index = {}; - right_index = 2; - left_elem = {}; - right_elem = {}; - return true; -} - -template -bool TestTaskSequential::validation() { - internal_order_test(); - return taskData->outputs_count[0] == 2 && taskData->outputs_count[1] == 2; -} - -template -bool TestTaskSequential::run() { - internal_order_test(); - if (ops == enum_ops::MAX_DIFFERENCE) { - double max_delta = 0; - double delta = 0; - size_t curr_index = 0; - auto iter_curr = input_.begin(); - auto iter_next = iter_curr + 1; - auto iter_end = input_.end() - 1; - auto iter_begin = input_.begin(); - while (iter_curr != iter_end) { - delta = abs(*iter_next - *iter_curr); - if (delta > max_delta) { - if (iter_begin == iter_curr) { - curr_index = 0; - max_delta = delta; - } else { - curr_index = std::distance(input_.begin(), iter_curr); - max_delta = delta; - } - } - iter_curr++; - iter_next = iter_curr + 1; - } - result = max_delta; - right_index = curr_index + 1; - left_index = curr_index; - left_elem = input_[left_index]; - right_elem = input_[right_index]; - } - return true; -} - -template -bool TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = left_elem; - reinterpret_cast(taskData->outputs[0])[1] = right_elem; - reinterpret_cast(taskData->outputs[1])[0] = left_index; - reinterpret_cast(taskData->outputs[1])[1] = right_index; - reinterpret_cast(taskData->outputs[2])[0] = result; - return true; -} - -template -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_, enum_ops::operations ops_) - : Task(std::move(taskData_)), ops(ops_) {} - - MPI_Datatype get_mpi_type(); - - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - ~TestMPITaskParallel() override { MPI_Type_free(&mpi_type_elem); } - - private: - std::vector input_; - std::vector local_input_; - unsigned int delta_n; - unsigned int delta_n_r; - double result; - unsigned int residue; - enum_ops::operations ops; - MPI_Datatype mpi_type_elem; - void print_local_data(); - double max_difference(); -}; - -template -MPI_Datatype TestMPITaskParallel::get_mpi_type() { - MPI_Type_contiguous(sizeof(TypeElem), MPI_BYTE, &mpi_type_elem); - MPI_Type_commit(&mpi_type_elem); - return mpi_type_elem; -} - -template -bool TestMPITaskParallel::pre_processing() { - internal_order_test(); - int ProcRank = 0; - int size = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - MPI_Comm_size(MPI_COMM_WORLD, &size); - if (ProcRank == 0) { - delta_n = taskData->inputs_count[0] / size; - delta_n_r = {}; - } - MPI_Bcast(&delta_n, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); - if (ProcRank == 0) { - input_ = std::vector(taskData->inputs_count[0]); - auto ptr = reinterpret_cast(taskData->inputs[0]); - std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); - } - if (ProcRank == 0) { - residue = taskData->inputs_count[0] % size; - delta_n_r = delta_n + residue; - local_input_ = std::vector(delta_n_r); - } else { - local_input_ = std::vector(delta_n); - } - MPI_Scatter(input_.data(), delta_n, mpi_type_elem, local_input_.data(), delta_n, mpi_type_elem, 0, MPI_COMM_WORLD); - if (ProcRank == 0) { - for (unsigned int i = delta_n; i < delta_n_r; i++) { - local_input_[i] = input_[i]; - } - } - result = {}; - residue = {}; - return true; -} - -template -bool TestMPITaskParallel::validation() { - internal_order_test(); - mpi_type_elem = get_mpi_type(); - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - if (ProcRank == 0) { - return taskData->outputs_count[0] == 1; - } - return true; -} - -template -bool TestMPITaskParallel::run() { - internal_order_test(); - double local_result = 0; - local_result = max_difference(); - if (ops == enum_ops::MAX_DIFFERENCE) { - double sendbuf1[1]; - sendbuf1[0] = local_result; - MPI_Reduce(sendbuf1, &result, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); - } - // finalisation - return true; -} - -template -bool TestMPITaskParallel::post_processing() { - internal_order_test(); - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - if (ProcRank == 0) { - reinterpret_cast(taskData->outputs[0])[0] = result; - } - return true; -} - -template -void TestMPITaskParallel::print_local_data() { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - if (ProcRank == 0) { - std::cout << "I'm proc 0" << "and my local_input data is "; - for (unsigned int i = 0; i < delta_n_r; i++) { - std::cout << local_input_[i] << " "; - } - std::cout << std::endl; - } else { - std::cout << "I'm" << ProcRank << " proc " << "and my local_input data is "; - for (unsigned int i = 0; i < delta_n; i++) { - std::cout << local_input_[i] << " "; - } - std::cout << std::endl; - } -} - -template -double TestMPITaskParallel::max_difference() { - double max_delta = 0; - double delta = 0; - double local_result = 0; - auto iter_curr = local_input_.begin(); - auto iter_next = iter_curr + 1; - auto iter_end = local_input_.end() - 1; - while (iter_curr != iter_end) { - delta = abs((double)(*iter_next - *iter_curr)); - if (delta > max_delta) { - max_delta = delta; - } - iter_curr++; - iter_next = iter_curr + 1; - local_result = max_delta; - } - return local_result; -} -} // namespace kholin_k_vector_neighbor_diff_elems_mpi \ No newline at end of file diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp deleted file mode 100644 index 15f2944ebe2..00000000000 --- a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp +++ /dev/null @@ -1,102 +0,0 @@ -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp" - -TEST(kholin_k_vector_neighbor_diff_elems_mpi, test_pipeline_run) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - std::vector global_vec; - std::vector global_elems(2, 0); - std::vector global_indices(2, 0); - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - std::shared_ptr taskDataPar = std::make_shared(); - if (ProcRank == 0) { - const float count_size_vector = 100000000; - global_vec = std::vector(count_size_vector); - for (size_t i = 0; i < global_vec.size(); i++) { - global_vec[i] = 4 * i + 2; - } - - global_vec[10] = 5000; - global_vec[11] = 1; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_elems.data())); - taskDataPar->outputs_count.emplace_back(global_elems.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices.data())); - taskDataPar->outputs_count.emplace_back(global_indices.size()); - } - - auto testMpiTaskParallel = - std::make_shared>(taskDataPar, op); - testMpiTaskParallel->validation(); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (ProcRank == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - } -} - -TEST(kholin_k_vector_neighbor_diff_elems_mpi, test_task_run) { - int ProcRank = 0; - MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); - std::vector global_vec; - std::vector global_elems(2, 0); - std::vector global_indices(2, 0); - enum_ops::operations op = enum_ops::MAX_DIFFERENCE; - - std::shared_ptr taskDataPar = std::make_shared(); - if (ProcRank == 0) { - const float count_size_vector = 100000000; - global_vec = std::vector(count_size_vector); - for (size_t i = 0; i < global_vec.size(); i++) { - global_vec[i] = 4 * i + 2; - } - - global_vec[10] = 5000; - global_vec[11] = 1; - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_elems.data())); - taskDataPar->outputs_count.emplace_back(global_elems.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices.data())); - taskDataPar->outputs_count.emplace_back(global_indices.size()); - } - - auto testMpiTaskParallel = - std::make_shared>(taskDataPar, op); - testMpiTaskParallel->validation(); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (ProcRank == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - } -} diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp deleted file mode 100644 index 94c0fb83537..00000000000 --- a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp" \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp deleted file mode 100644 index 4d8e3c4930b..00000000000 --- a/tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp +++ /dev/null @@ -1,291 +0,0 @@ -#include - -#include - -#include "core/task/include/task.hpp" -#include "seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp" - -TEST(kholin_k_vector_neighbor_diff_elems_seq, check_pre_processing) { - std::vector in(1256, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - - EXPECT_EQ(testTaskSequential.pre_processing(), true); -} - -TEST(kholin_k_vector_neighbor_diff_elems_seq, check_validation) { - std::vector in(1256, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - EXPECT_EQ(testTaskSequential.validation(), true); -} - -TEST(kholin_k_vector_neighbor_diff_elems_seq, check_run) { - std::vector in(1256, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - EXPECT_EQ(testTaskSequential.run(), true); -} - -TEST(kholin_k_vector_neighbor_diff_elems_seq, check_post_processing) { - std::vector in(1256, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - EXPECT_EQ(testTaskSequential.post_processing(), true); -} - -TEST(kholin_k_vector_neighbor_diff_elems_seq, check_int32_t) { - std::vector in(1256, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - for (size_t i = 0; i < in.size(); i++) { - in[i] = 2 * i; - } - in[234] = 0; - in[235] = 4000; - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - EXPECT_EQ(out[0], 0l); - EXPECT_EQ(out[1], 4000l); - EXPECT_EQ(out_index[0], 234ull); - EXPECT_EQ(out_index[1], 235ull); -} - -TEST(kholin_k_vector_neighbor_diff_elems_seq, check_int_with_random) { - std::vector in(1256, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - in = kholin_k_vector_neighbor_diff_elems_seq::get_random_vector(1256); - in[234] = 0; - in[235] = 4000; - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - EXPECT_EQ(out[0], 0l); - EXPECT_EQ(out[1], 4000l); - EXPECT_EQ(out_index[0], 234ull); - EXPECT_EQ(out_index[1], 235ull); -} - -TEST(kholin_k_vector_neighbour_diff_elems_seq, check_double) { - std::vector in(25680, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - for (size_t i = 0; i < in.size(); i++) { - in[i] = i; - } - in[189] = -1000.1; - in[190] = 9000.9; - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - EXPECT_NEAR(out[0], -1000.1, 1e-6); - EXPECT_NEAR(out[1], 9000.9, 1e-6); - EXPECT_EQ(out_index[0], 189ull); - EXPECT_EQ(out_index[1], 190ull); -} - -TEST(kholin_k_vector_neighbour_diff_elems_seq, check_int8_t) { - std::vector in(250, -1); - std::vector out(2, 0); - std::vector out_index(2, 0); - for (size_t i = 0; i < in.size(); i++) { - if (i % 2 == 0) { - in[i] = -50; - } else { - in[i] = 50; - } - } - in[5] = 56; - in[6] = -56; - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - EXPECT_EQ(out[0], 56); - EXPECT_EQ(out[1], -56); - EXPECT_EQ(out_index[0], 5ull); - EXPECT_EQ(out_index[1], 6ull); -} - -TEST(kholin_k_vector_neighbour_diff_elems_seq, check_int64_t) { - std::vector in(75836, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - for (size_t i = 0; i < in.size(); i++) { - if (i % 3 == 0) { - in[i] = 10; - } - if (i % 3 == 1) { - in[i] = 30; - } - if (i % 3 == 2) { - in[i] = 70; - } - } - in[20] = -1000; - in[21] = 1119; - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - EXPECT_EQ(out[0], -1000ll); - EXPECT_EQ(out[1], 1119ll); - EXPECT_EQ(out_index[0], 20ull); - EXPECT_EQ(out_index[1], 21ull); -} - -TEST(kholin_k_vector_neighbour_diff_elems_seq, check_float) { - std::vector in(20, 1.0f); - std::vector out(2, 0.0f); - std::vector out_index(2, 0); - for (size_t i = 0; i < in.size(); i++) { - in[i] += (i + 1.0f) * 2.5f; - } - in[0] = 110.001f; - in[1] = -990.0025f; - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - EXPECT_NEAR(out[0], 110.001f, 1e-4); - EXPECT_NEAR(out[1], -990.0025f, 1e-4); - EXPECT_EQ(out_index[0], 0ull); - EXPECT_EQ(out_index[1], 1ull); -} - -TEST(kholin_k_vector_neighbour_diff_elems_seq, check_float_with_random) { - std::vector in(20, 1.0f); - std::vector out(2, 0.0f); - std::vector out_index(2, 0); - in = kholin_k_vector_neighbor_diff_elems_seq::get_random_vector(20); - in[0] = 110.001f; - in[1] = -990.0025f; - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - kholin_k_vector_neighbor_diff_elems_seq::MostDiffNeighborElements testTaskSequential(taskData); - testTaskSequential.validation(); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - EXPECT_NEAR(out[0], 110.001f, 1e-4); - EXPECT_NEAR(out[1], -990.0025f, 1e-4); - EXPECT_EQ(out_index[0], 0ull); - EXPECT_EQ(out_index[1], 1ull); -} \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp deleted file mode 100644 index 116058ae9ac..00000000000 --- a/tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp +++ /dev/null @@ -1,121 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -using namespace std::chrono_literals; - -namespace kholin_k_vector_neighbor_diff_elems_seq { - -template -std::vector get_random_vector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - - if (std::is_integral::value) { - std::uniform_int_distribution dist(0, 99); - for (int i = 0; i < sz; i++) { - vec[i] = dist(gen); - } - } else if (std::is_floating_point::value) { - std::uniform_real_distribution dist(0.0, 99.0); - for (int i = 0; i < sz; i++) { - vec[i] = dist(gen); - } - } else { - throw std::invalid_argument("TypeElem must be an integral or floating point type"); - } - - return vec; -} - -template -class MostDiffNeighborElements : public ppc::core::Task { - public: - explicit MostDiffNeighborElements(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - double result; - TypeIndex left_index; - TypeIndex right_index; - TypeElem left_elem; - TypeElem right_elem; -}; - -template -bool MostDiffNeighborElements::pre_processing() { - internal_order_test(); - input_ = std::vector(taskData->inputs_count[0]); - auto ptr = reinterpret_cast(taskData->inputs[0]); - std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); - result = {}; - left_index = {}; - right_index = 2; - left_elem = {}; - right_elem = {}; - return true; -} - -template -bool MostDiffNeighborElements::validation() { - internal_order_test(); - return taskData->outputs_count[0] == 2 && taskData->outputs_count[1] == 2; -} - -template -bool MostDiffNeighborElements::run() { - internal_order_test(); - double max_delta = 0; - double delta = 0; - size_t curr_index = 0; - auto iter_curr = input_.begin(); - auto iter_next = iter_curr + 1; - auto iter_end = input_.end() - 1; - auto iter_begin = input_.begin(); - while (iter_curr != iter_end) { - delta = fabs((double)(*iter_next - *iter_curr)); - if (delta > max_delta) { - if (iter_begin == iter_curr) { - curr_index = 0; - max_delta = delta; - } else { - curr_index = std::distance(input_.begin(), iter_curr); - max_delta = delta; - } - } - iter_curr++; - iter_next = iter_curr + 1; - } - result = max_delta; - right_index = curr_index + 1; - left_index = curr_index; - left_elem = input_[left_index]; - - right_elem = input_[right_index]; - return true; -} - -template -bool MostDiffNeighborElements::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = left_elem; - reinterpret_cast(taskData->outputs[0])[1] = right_elem; - reinterpret_cast(taskData->outputs[1])[0] = left_index; - reinterpret_cast(taskData->outputs[1])[1] = right_index; - return true; -} -} // namespace kholin_k_vector_neighbor_diff_elems_seq \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp deleted file mode 100644 index f3b70f43b2d..00000000000 --- a/tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp +++ /dev/null @@ -1,80 +0,0 @@ -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp" - -TEST(kholin_k_vector_neighbor_diff_elems_seq, test_pipeline_run) { - const int count = 20000000; - - std::vector in(count, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - for (size_t i = 0; i < in.size(); i++) { - in[i] = i; - } - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - auto testTaskSequential = - std::make_shared>(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); -} - -TEST(kholin_k_vector_neighbor_diff_elems_seq, test_task_run) { - const int count = 250000000; - - std::vector in(count, 1); - std::vector out(2, 0); - std::vector out_index(2, 0); - for (size_t i = 0; i < in.size(); i++) { - in[i] = i; - } - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(in.data())); - taskData->inputs_count.emplace_back(in.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); - taskData->outputs_count.emplace_back(out_index.size()); - - auto testTaskSequential = - std::make_shared>(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); -} diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp deleted file mode 100644 index c67ad3aabd7..00000000000 --- a/tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp" \ No newline at end of file From 9d6a724d94d2039555ee53f1f8f06f5c7250aa37 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Fri, 1 Nov 2024 18:45:22 +0300 Subject: [PATCH 038/155] =?UTF-8?q?=D0=A2=D0=B8=D1=82=D0=BE=D0=B2=20=D0=A1?= =?UTF-8?q?=D0=B5=D0=BC=D1=91=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87?= =?UTF-8?q?=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=201.?= =?UTF-8?q?=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D1=8D=D0=BB=D0=B5=D0=BC?= =?UTF-8?q?=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE?= =?UTF-8?q?=D1=80=D0=B0.=20(#61)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Описание последовательной задачи: Суммирование элементов с помощью std::accumulate (итерация по вектору с накоплением суммы). * Описание параллельной задачи: Вектор разделяется на равные части между процессами, которые одновременно вычисляют частичные суммы. После завершения локальных вычислений процессы возвращают результаты главному процессу, где они суммируются с использованием операции reduce. Внутри каждого процесса суммирование происходит также с помощью std::accumulate. --- .../titov_s_vector_sum/func_tests/main.cpp | 241 ++++++++++++++++++ .../titov_s_vector_sum/include/ops_mpi.hpp | 49 ++++ .../titov_s_vector_sum/perf_tests/main.cpp | 88 +++++++ tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp | 116 +++++++++ .../titov_s_vector_sum/func_tests/main.cpp | 135 ++++++++++ .../titov_s_vector_sum/include/ops_seq.hpp | 26 ++ .../titov_s_vector_sum/perf_tests/main.cpp | 81 ++++++ tasks/seq/titov_s_vector_sum/src/ops_seq.cpp | 45 ++++ 8 files changed, 781 insertions(+) create mode 100644 tasks/mpi/titov_s_vector_sum/func_tests/main.cpp create mode 100644 tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp create mode 100644 tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp create mode 100644 tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp create mode 100644 tasks/seq/titov_s_vector_sum/func_tests/main.cpp create mode 100644 tasks/seq/titov_s_vector_sum/include/ops_seq.hpp create mode 100644 tasks/seq/titov_s_vector_sum/perf_tests/main.cpp create mode 100644 tasks/seq/titov_s_vector_sum/src/ops_seq.cpp diff --git a/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp b/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp new file mode 100644 index 00000000000..c4fe34b01ba --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp @@ -0,0 +1,241 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" + +TEST(titov_s_vector_sum_mpi, Test_Sum_100) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_EmptyArray) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_sum[0], 0); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_1000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1000; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_100000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100000; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_SmallArray_1) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_SmallArray_0) { + boost::mpi::communicator world; + std::vector global_vec(1, 0); + std::vector global_sum(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} diff --git a/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp b/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp new file mode 100644 index 00000000000..3319bd016ec --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace titov_s_vector_sum_mpi { + +std::vector getRandomVector(int sz); + +class MPIVectorSumSequential : public ppc::core::Task { + public: + explicit MPIVectorSumSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; + std::string ops; +}; + +class MPIVectorSumParallel : public ppc::core::Task { + public: + explicit MPIVectorSumParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace titov_s_vector_sum_mpi diff --git a/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp b/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp new file mode 100644 index 00000000000..c565240f793 --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp @@ -0,0 +1,88 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" + +TEST(titov_s_vector_sum_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 100000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto MPIVectorSumParallel = std::make_shared(taskDataPar); + ASSERT_EQ(MPIVectorSumParallel->validation(), true); + MPIVectorSumParallel->pre_processing(); + MPIVectorSumParallel->run(); + MPIVectorSumParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MPIVectorSumParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 100000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto MPIVectorSumParallel = std::make_shared(taskDataPar); + ASSERT_EQ(MPIVectorSumParallel->validation(), true); + MPIVectorSumParallel->pre_processing(); + MPIVectorSumParallel->run(); + MPIVectorSumParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MPIVectorSumParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} diff --git a/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp b/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp new file mode 100644 index 00000000000..5f5b744c068 --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp @@ -0,0 +1,116 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector titov_s_vector_sum_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + res = 0; + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::run() { + internal_order_test(); + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + unsigned int remainder = 0; + + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + remainder = taskData->inputs_count[0] % world.size(); + } + + broadcast(world, delta, 0); + broadcast(world, remainder, 0); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + + for (int proc = 1; proc < world.size(); proc++) { + unsigned int send_size = (proc == world.size() - 1) ? delta + remainder : delta; + world.send(proc, 0, input_.data() + proc * delta, send_size); + } + } + local_input_ = std::vector((world.rank() == world.size() - 1) ? delta + remainder : delta); + + if (world.rank() != 0) { + unsigned int recv_size = (world.rank() == world.size() - 1) ? delta + remainder : delta; + world.recv(0, 0, local_input_.data(), recv_size); + } else { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } + + res = 0; + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::run() { + internal_order_test(); + int local_res; + local_res = std::accumulate(local_input_.begin(), local_input_.end(), 0); + reduce(world, local_res, res, std::plus(), 0); + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/titov_s_vector_sum/func_tests/main.cpp b/tasks/seq/titov_s_vector_sum/func_tests/main.cpp new file mode 100644 index 00000000000..3c60cb5d6f5 --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/func_tests/main.cpp @@ -0,0 +1,135 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/titov_s_vector_sum/include/ops_seq.hpp" + +TEST(titov_s_vector_sum_seq, Test_Int) { + // Create data + std::vector in(1, 10); + const int expected_sum = 10; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(expected_sum, out[0]); +} + +TEST(titov_s_vector_sum_seq, Test_Double) { + // Create data + std::vector in(1, 10); + const int expected_sum = 10; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + EXPECT_NEAR(out[0], expected_sum, 1e-6); +} + +TEST(titov_s_vector_sum_seq, Test_Float) { + // Create data + std::vector in(1, 1.f); + std::vector out(1, 0.f); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + EXPECT_NEAR(out[0], static_cast(in.size()), 1e-3f); +} + +TEST(titov_s_vector_sum_seq, Test_Int64_t) { + // Create data + std::vector in(75836, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(static_cast(out[0]), in.size()); +} + +TEST(titov_s_vector_sum_seq, Test_Uint8_t) { + // Create data + std::vector in(255, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(static_cast(out[0]), in.size()); +} + +TEST(titov_s_vector_sum_seq, Test_Empty_Array) { + // Create data + std::vector in(1, 0); + const int expected_sum = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(expected_sum, out[0]); +} diff --git a/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp b/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp new file mode 100644 index 00000000000..d29d94269bd --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace titov_s_vector_sum_seq { +template +class VectorSumSequential : public ppc::core::Task { + public: + explicit VectorSumSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + InOutType res; +}; + +} // namespace titov_s_vector_sum_seq diff --git a/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp b/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp new file mode 100644 index 00000000000..3b772d11c2b --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp @@ -0,0 +1,81 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/titov_s_vector_sum/include/ops_seq.hpp" + +TEST(titov_s_vector_sum_seq, test_pipeline_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto vectorSumSequential = std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(vectorSumSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(titov_s_vector_sum_seq, test_task_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto vectorSumSequential = std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(vectorSumSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp b/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp new file mode 100644 index 00000000000..98ca0cac0b2 --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp @@ -0,0 +1,45 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/titov_s_vector_sum/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +template +bool titov_s_vector_sum_seq::VectorSumSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + res = 0; + return true; +} + +template +bool titov_s_vector_sum_seq::VectorSumSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +template +bool titov_s_vector_sum_seq::VectorSumSequential::run() { + internal_order_test(); + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +template +bool titov_s_vector_sum_seq::VectorSumSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; From df914fd0937c93e2c148b60be997a019f63fb252 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Fri, 1 Nov 2024 17:07:10 +0100 Subject: [PATCH 039/155] Add separate running for debug tests (#106) --- scripts/run_perf_collector.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/scripts/run_perf_collector.sh b/scripts/run_perf_collector.sh index 124620fafe8..040f4eb0a52 100644 --- a/scripts/run_perf_collector.sh +++ b/scripts/run_perf_collector.sh @@ -1,12 +1,16 @@ #!/bin/bash - -if [[ -z "$ASAN_RUN" ]]; then - if [[ $OSTYPE == "linux-gnu" ]]; then - mpirun --oversubscribe -np 4 ./build/bin/mpi_perf_tests - elif [[ $OSTYPE == "darwin"* ]]; then - mpirun -np 2 ./build/bin/mpi_perf_tests +# separate tests for debug +for test_item in $(./build/bin/mpi_perf_tests --gtest_list_tests | awk '/\./{ SUITE=$1 } / / { print SUITE $1 }') +do + if [[ -z "$ASAN_RUN" ]]; then + if [[ $OSTYPE == "linux-gnu" ]]; then + mpirun --oversubscribe -np 4 ./build/bin/mpi_perf_tests --gtest_filter="$test_item" + elif [[ $OSTYPE == "darwin"* ]]; then + mpirun -np 2 ./build/bin/mpi_perf_tests --gtest_filter="$test_item" + fi fi -fi +done + ./build/bin/omp_perf_tests ./build/bin/seq_perf_tests ./build/bin/stl_perf_tests From 79015cb90bb11b4a2c4587a9e59f83fae540736d Mon Sep 17 00:00:00 2001 From: Alexey Chistov <112825972+Alexey2013@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:08:14 +0300 Subject: [PATCH 040/155] Fix tests with number of processes: 3, 5, 7, 8 (#78) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Исправлена ошибка с числом процессов в pre_processing.Теперь программа корректно работает на указанном числе процессов --- .../src/ops_mpi.cpp | 25 +++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp b/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp index b0341e12788..f5b9d6cfbc4 100644 --- a/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp +++ b/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp @@ -38,14 +38,19 @@ template bool TestMPITaskParallel::pre_processing() { internal_order_test(); - int delta = 0; + int delta1 = 0; + int delta2 = 0; + if (world.rank() == 0) { n = static_cast(taskData->inputs_count[1]); m = static_cast(taskData->inputs_count[2]); - delta = (n * m) / world.size(); + int total_elements = n * m; + delta1 = total_elements / world.size(); + delta2 = total_elements % world.size(); } - boost::mpi::broadcast(world, delta, 0); + boost::mpi::broadcast(world, delta1, 0); + boost::mpi::broadcast(world, delta2, 0); if (world.rank() == 0) { input_ = std::vector(n * m); @@ -53,16 +58,22 @@ bool TestMPITaskParallel::pre_processing() { for (int i = 0; i < static_cast(taskData->inputs_count[0]); i++) { input_[i] = tmp_ptr[i]; } + + int start_index = delta1 + (delta2 > 0 ? 1 : 0); for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, input_.data() + proc * delta, delta); + int current_delta = delta1 + (proc < delta2 ? 1 : 0); + world.send(proc, 0, input_.data() + start_index, current_delta); + start_index += current_delta; } } - local_input_ = std::vector(delta); + int local_size = delta1 + (world.rank() < delta2 ? 1 : 0); + local_input_ = std::vector(local_size); + if (world.rank() == 0) { - local_input_ = std::vector(input_.begin(), input_.begin() + delta); + local_input_ = std::vector(input_.begin(), input_.begin() + local_size); } else { - world.recv(0, 0, local_input_.data(), delta); + world.recv(0, 0, local_input_.data(), local_size); } return true; From 0d1c72db23a07b55e9acf5ec41b22580ecbe535c Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Sat, 2 Nov 2024 00:10:48 +0800 Subject: [PATCH 041/155] [CI] Add trigger on 'workflow_dispatch' for Github Actions (#107) --- .github/workflows/main.yml | 1 + .github/workflows/perf-statistic.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 236343a12dc..b4889117e87 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -5,6 +5,7 @@ on: pull_request: schedule: - cron: '0 12 * * *' + workflow_dispatch: jobs: ubuntu-gcc-build: diff --git a/.github/workflows/perf-statistic.yml b/.github/workflows/perf-statistic.yml index 86b37373bbc..bd26d5cf7d3 100644 --- a/.github/workflows/perf-statistic.yml +++ b/.github/workflows/perf-statistic.yml @@ -3,6 +3,7 @@ name: Collect performance statistic on: schedule: - cron: '0 12 * * *' + workflow_dispatch: jobs: ubuntu-gcc-build: From a8e27b36ec5cf2f5777e982263c40cd05b0b4099 Mon Sep 17 00:00:00 2001 From: 0xG00SE <61384845+DSolo03@users.noreply.github.com> Date: Sat, 2 Nov 2024 03:18:06 +0300 Subject: [PATCH 042/155] =?UTF-8?q?=D0=A1=D0=BE=D0=BB=D0=BE=D0=B2=D1=8C?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=94=D0=B0=D0=BD=D0=B8=D0=BB=D0=B0.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=203.=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0?= =?UTF-8?q?.=20(#17)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Для каждого из элементов вектора, сравниваем его значение со значением переменной, в которой хранится максимальное значение вектора. Если значение данного элемента вектора больше чем значение переменной, то записываем значение данного элемента вектора в переменную. Описание MPI задачи: Делим входные данные на несколько фрагментов, количество которых является количеством возможных процессов. Каждый из фрагментов, кроме первого, отдаем на исполнение соответствующим процессам. Первый фрагмент будет обработан в том же процессе, что и разделял данные. В каждом из процессов, ищется максимальное значение данного ему фрагмента, соответственно последовательной задачи, и из каждого из процессов, собирается максимальное из всех значений при помощи функции `reduce` с оператором `maximum`. Максимальное значение записывается в переменную `result` и возвращается. --- .../solovyev_d_vector_max/func_tests/main.cpp | 107 ++++++++++++ .../solovyev_d_vector_max/include/header.hpp | 48 ++++++ .../solovyev_d_vector_max/perf_tests/main.cpp | 98 +++++++++++ .../mpi/solovyev_d_vector_max/src/source.cpp | 119 ++++++++++++++ .../solovyev_d_vector_max/func_tests/main.cpp | 153 ++++++++++++++++++ .../solovyev_d_vector_max/include/header.hpp | 25 +++ .../solovyev_d_vector_max/perf_tests/main.cpp | 92 +++++++++++ .../seq/solovyev_d_vector_max/src/source.cpp | 47 ++++++ 8 files changed, 689 insertions(+) create mode 100644 tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp create mode 100644 tasks/mpi/solovyev_d_vector_max/include/header.hpp create mode 100644 tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp create mode 100644 tasks/mpi/solovyev_d_vector_max/src/source.cpp create mode 100644 tasks/seq/solovyev_d_vector_max/func_tests/main.cpp create mode 100644 tasks/seq/solovyev_d_vector_max/include/header.hpp create mode 100644 tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp create mode 100644 tasks/seq/solovyev_d_vector_max/src/source.cpp diff --git a/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp b/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp new file mode 100644 index 00000000000..ba20c58cfc1 --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp @@ -0,0 +1,107 @@ +#include + +#include +#include +#include +#include +#include + +#include "mpi/solovyev_d_vector_max/include/header.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(solovyev_d_vector_max_mpi, Test_Max) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + std::cerr << "1 " << world.rank() << std::endl; + if (world.rank() == 0) { + const int count_size_vector = 240; + global_vec = getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + std::cerr << "2 " << world.rank() << std::endl; + solovyev_d_vector_max_mpi::VectorMaxMPIParallel VectorMaxMPIParallel(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel.validation(), true); + VectorMaxMPIParallel.pre_processing(); + VectorMaxMPIParallel.run(); + VectorMaxMPIParallel.post_processing(); + std::cerr << "3 " << world.rank() << std::endl; + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxMPISequential(taskDataSeq); + ASSERT_EQ(VectorMaxMPISequential.validation(), true); + VectorMaxMPISequential.pre_processing(); + VectorMaxMPISequential.run(); + VectorMaxMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_2) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + global_vec = getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + solovyev_d_vector_max_mpi::VectorMaxMPIParallel VectorMaxMPIParallel(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel.validation(), true); + VectorMaxMPIParallel.pre_processing(); + VectorMaxMPIParallel.run(); + VectorMaxMPIParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxMPISequential(taskDataSeq); + ASSERT_EQ(VectorMaxMPISequential.validation(), true); + VectorMaxMPISequential.pre_processing(); + VectorMaxMPISequential.run(); + VectorMaxMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} diff --git a/tasks/mpi/solovyev_d_vector_max/include/header.hpp b/tasks/mpi/solovyev_d_vector_max/include/header.hpp new file mode 100644 index 00000000000..0b49b459cad --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/include/header.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace solovyev_d_vector_max_mpi { + +int vectorMax(std::vector> v); + +class VectorMaxSequential : public ppc::core::Task { + public: + explicit VectorMaxSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector data; + int result{}; + std::string ops; +}; + +class VectorMaxMPIParallel : public ppc::core::Task { + public: + explicit VectorMaxMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector data, localData; + int result{}; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace solovyev_d_vector_max_mpi \ No newline at end of file diff --git a/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp b/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp new file mode 100644 index 00000000000..36f24830de3 --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp @@ -0,0 +1,98 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/solovyev_d_vector_max/include/header.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(solovyev_d_vector_max_mpi, run_pipeline) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 12000000; + global_vec = getRandomVector(count_size_vector); + global_vec[count_size_vector / 2] = 1024; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto VectorMaxMPIParallel = std::make_shared(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel->validation(), true); + VectorMaxMPIParallel->pre_processing(); + VectorMaxMPIParallel->run(); + VectorMaxMPIParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(VectorMaxMPIParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, global_res[0]); + } +} + +TEST(solovyev_d_vector_max_mpi, run_task) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 12000000; + global_vec = getRandomVector(count_size_vector); + global_vec[count_size_vector / 2] = 1024; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto VectorMaxMPIParallel = std::make_shared(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel->validation(), true); + VectorMaxMPIParallel->pre_processing(); + VectorMaxMPIParallel->run(); + VectorMaxMPIParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(VectorMaxMPIParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, global_res[0]); + } +} diff --git a/tasks/mpi/solovyev_d_vector_max/src/source.cpp b/tasks/mpi/solovyev_d_vector_max/src/source.cpp new file mode 100644 index 00000000000..76213313933 --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/src/source.cpp @@ -0,0 +1,119 @@ +#include +#include +#include +#include +#include +#include + +#include "mpi/solovyev_d_vector_max/include/header.hpp" + +using namespace std::chrono_literals; + +int solovyev_d_vector_max_mpi::vectorMax(std::vector> v) { + int m = -214748364; + for (std::string::size_type i = 0; i < v.size(); i++) { + if (v[i] > m) { + m = v[i]; + } + } + return m; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::pre_processing() { + internal_order_test(); + + // Determine number of vector elements per process + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + + // Share delta between all processes + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // Convert input data to vector + int* input_ = reinterpret_cast(taskData->inputs[0]); + data = std::vector(input_, input_ + taskData->inputs_count[0]); + + // Send each of processes their portion of data + for (int process = 1; process < world.size(); process++) { + world.send(process, 0, data.data() + process * delta, delta); + } + } + + // Initialize local vector + localData = std::vector(delta); + if (world.rank() == 0) { + // Getting data directly if we in zero process + localData = std::vector(data.begin(), data.begin() + delta); + } else { + // Otherwise, recieving data + world.recv(0, 0, localData.data(), delta); + } + + // Init result value + result = 0; + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); + } + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::run() { + internal_order_test(); + int localResult; + + // Search for maximum vector element in current process data + localResult = vectorMax(localData); + + // Search for maximum vector element using all processes data + reduce(world, localResult, result, boost::mpi::maximum(), 0); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::pre_processing() { + internal_order_test(); + + // Init data vector + int* input_ = reinterpret_cast(taskData->inputs[0]); + data = std::vector(input_, input_ + taskData->inputs_count[0]); + + // Init result value + result = 0; + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::run() { + internal_order_test(); + + // Determine maximum value of data vector + result = vectorMax(data); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} \ No newline at end of file diff --git a/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp b/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp new file mode 100644 index 00000000000..e05edf14e91 --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp @@ -0,0 +1,153 @@ +#include + +#include +#include + +#include "seq/solovyev_d_vector_max/include/header.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(solovyev_d_vector_max_mpi, Test_Empty) { + // Create data + std::vector in(0, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), false); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_10) { + const int count = 10; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_100) { + const int count = 20; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_1000) { + const int count = 50; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_10000) { + const int count = 70; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_100000) { + const int count = 100; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} diff --git a/tasks/seq/solovyev_d_vector_max/include/header.hpp b/tasks/seq/solovyev_d_vector_max/include/header.hpp new file mode 100644 index 00000000000..712e45ed1ac --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/include/header.hpp @@ -0,0 +1,25 @@ + +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace solovyev_d_vector_max_mpi { +int vectorMax(std::vector> v); +class VectorMaxSequential : public ppc::core::Task { + public: + explicit VectorMaxSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector data; + int result{}; + std::string ops; +}; + +} // namespace solovyev_d_vector_max_mpi \ No newline at end of file diff --git a/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp b/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp new file mode 100644 index 00000000000..8323ffa5f34 --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp @@ -0,0 +1,92 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/solovyev_d_vector_max/include/header.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(solovyev_d_vector_max_mpi, test_pipeline_run) { + const int count = 12000000; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, test_task_run) { + const int count = 12000000; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, out[0]); +} diff --git a/tasks/seq/solovyev_d_vector_max/src/source.cpp b/tasks/seq/solovyev_d_vector_max/src/source.cpp new file mode 100644 index 00000000000..6ead459248c --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/src/source.cpp @@ -0,0 +1,47 @@ +#include +#include + +#include "seq/solovyev_d_vector_max/include/header.hpp" + +using namespace std::chrono_literals; + +int solovyev_d_vector_max_mpi::vectorMax(std::vector> v) { + int m = -214748364; + for (std::string::size_type i = 0; i < v.size(); i++) { + if (v[i] > m) { + m = v[i]; + } + } + return m; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::pre_processing() { + internal_order_test(); + + // Init data vector + int* input_ = reinterpret_cast(taskData->inputs[0]); + data = std::vector(input_, input_ + taskData->inputs_count[0]); + // Init result value + result = 0; + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::run() { + internal_order_test(); + + // Determine maximum value of data vector + result = vectorMax(data); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} \ No newline at end of file From 12130c0cbbf52f5556ef693f00bc4784f07e71f1 Mon Sep 17 00:00:00 2001 From: Grudzin Konstantin <113104424+Konstantin-Grudzin@users.noreply.github.com> Date: Sat, 2 Nov 2024 04:22:08 +0300 Subject: [PATCH 043/155] =?UTF-8?q?=D0=93=D1=80=D1=83=D0=B4=D0=B7=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=9A=D0=BE=D0=BD=D1=81=D1=82=D0=B0=D0=BD=D1=82=D0=B8?= =?UTF-8?q?=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=207.=20=D0=9D=D0=B0=D1=85?= =?UTF-8?q?=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BD=D0=B0=D0=B8?= =?UTF-8?q?=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5=20=D0=B1=D0=BB=D0=B8=D0=B7=D0=BA?= =?UTF-8?q?=D0=B8=D1=85=20=D0=BF=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D1=8E=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8?= =?UTF-8?q?=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#54)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Основная идея**: Представим ответ в виде пары чисел - значения и самого индекса. Если мы по определению сравнения пар будем брать минимум, мы получим самый левый индекс с минимальным значением. **Последовательная реализация**: Мы проходимся по массиву и находим минимальный элемент среди пар "абсолютная разница соседних" и индексом. **Реализация MPI**: Мы разделяем наши массивы на несколько равных частей и отдаем каждому процессу свою часть массива. Он вычисляет минимум и с помощью операции reduce передает эту информацию главному процессу. Т.к мы проверяем ещё и следующий элемент, каждому процессу добавляем по одному дополнительному элементу. Чтобы избежать выход за массивы, а также корректно вычислять ответ, передаем каждому потоку точку его старта и размер действительных данных. --- .../func_tests/main.cpp | 309 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 +++ .../perf_tests/main.cpp | 90 +++++ .../src/ops_mpi.cpp | 101 ++++++ .../func_tests/main.cpp | 133 ++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 85 +++++ .../src/ops_seq.cpp | 35 ++ 8 files changed, 825 insertions(+) create mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp create mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..880dd20042f --- /dev/null +++ b/tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,309 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp" + +namespace grudzin_k_nearest_neighbor_elements_mpi { + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = -100 + gen() % 201; + } + return vec; +} + +} // namespace grudzin_k_nearest_neighbor_elements_mpi + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Wrong_Test) { + boost::mpi::communicator world; + std::vector global_vec(1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_10k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_1k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_2k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 2000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_4k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 4000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_3k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 3000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_3) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 3; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..7591263e048 --- /dev/null +++ b/tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace grudzin_k_nearest_neighbor_elements_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::pair res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + std::pair res; + size_t size; + size_t start; + boost::mpi::communicator world; +}; + +} // namespace grudzin_k_nearest_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..342f90c6b7c --- /dev/null +++ b/tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,90 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp" + +TEST(grudzin_k_nearest_neighbor_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 5000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 50000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_sum[0]); + } +} diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..496b3ccf9db --- /dev/null +++ b/tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp @@ -0,0 +1,101 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp" + +#include +#include +#include + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + // Init value for output + res = {INT_MAX, -1}; + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size() - 1; ++i) { + std::pair tmp = {abs(input_[i] - input_[i + 1]), i}; + res = std::min(res, tmp); + } + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res.second; + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + // Init value for output + res = {INT_MAX, -1}; + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = (taskData->inputs_count[0]) / world.size(); + size = taskData->inputs_count[0]; + if (taskData->inputs_count[0] % world.size() > 0u) delta++; + } + broadcast(world, delta, 0); + broadcast(world, size, 0); + + if (world.rank() == 0) { + // Init vectors + input_ = std::vector(world.size() * delta + 2, 0); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta + 1); + } + } + + local_input_ = std::vector(delta + 1); + start = world.rank() * delta; + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta + 1); + } else { + world.recv(0, 0, local_input_.data(), delta + 1); + } + + std::pair local_ans_ = {INT_MAX, -1}; + for (size_t i = 0; i < local_input_.size() - 1 && (i + start) < size - 1; ++i) { + std::pair tmp = {abs(local_input_[i] - local_input_[i + 1]), i + start}; + local_ans_ = std::min(local_ans_, tmp); + } + reduce(world, local_ans_, res, boost::mpi::minimum>(), 0); + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res.second; + } + return true; +} diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..dfd38c04fa4 --- /dev/null +++ b/tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,133 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(grudzin_k_nearest_neighbor_elements_seq, Wrong_Test) { + std::vector in = {2}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_Lazy) { + // Create data + std::vector in = {2, 3}; + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_24) { + // Create data + std::vector in = {2, 3, 4, 1, 7, 3, 2, 9, -15, 3}; + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_40) { + // Create data + std::vector in = {2, 3, 4, 1, 7, 3, 2, 9, -15, 3, -1, 5, 8, 5, 12, 9, 24, 12, + 2, 3, 4, 1, 7, 3, 2, 9, -15, 3, -1, 5, 8, 5, 12, 9, 24, 12}; + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_60) { + // Create data + std::vector in(100, 0); + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_Negative) { + // Create data + std::vector in = {-1, -3, -5, -4, -2}; + std::vector out(1, 0); + int ans = 2; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..9f91d411f6d --- /dev/null +++ b/tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace grudzin_k_nearest_neighbor_elements_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_{}; + std::pair res{}; +}; + +} // namespace grudzin_k_nearest_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..785b6a6321c --- /dev/null +++ b/tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,85 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(grudzin_k_nearest_neighbor_elements_seq, test_pipeline_run) { + int size = 10000000; + // Create data + std::vector in(size); + std::vector out(1, 0); + int ans = 3; + for (int i = 0; i < size; ++i) { + in[i] = 3 * i; + } + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, test_task_run) { + int size = 10000000; + // Create data + std::vector in(size); + std::vector out(1, 0); + int ans = 2; + for (int i = 0; i < size; ++i) { + in[i] = 2 * i; + } + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out[0]); +} diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..c661925eb7c --- /dev/null +++ b/tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp @@ -0,0 +1,35 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp" + +#include + +bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + // Init value for output + res = {INT_MAX, -1}; + return true; +} + +bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; +} + +bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size() - 1; i++) { + res = std::min(res, {abs(input_[i] - input_[i + 1]), i}); + } + return true; +} + +bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res.second; + return true; +} From 5e2140f16dc8469f9af46c505e9ae5892101245a Mon Sep 17 00:00:00 2001 From: Cufeed <132374822+Cufeed@users.noreply.github.com> Date: Sat, 2 Nov 2024 04:24:04 +0300 Subject: [PATCH 044/155] =?UTF-8?q?=D0=93=D1=83=D1=81=D0=B5=D0=B2=20=D0=9D?= =?UTF-8?q?=D0=B8=D0=BA=D0=B8=D1=82=D0=B0.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2020.=20=D0=98=D0=BD=D1=82=D0=B5=D0=B3=D1=80=D0=B8=D1=80=D0=BE?= =?UTF-8?q?=D0=B2=D0=B0=D0=BD=D0=B8=D0=B5=20=E2=80=93=20=D0=BC=D0=B5=D1=82?= =?UTF-8?q?=D0=BE=D0=B4=20=D1=82=D1=80=D0=B0=D0=BF=D0=B5=D1=86=D0=B8=D0=B9?= =?UTF-8?q?.=20(#60)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Извлечение параметров: Из входных данных извлекаются нижний и верхний пределы интегрирования, а также количество трапеций. Валидация данных: Проверяется корректность входных данных, чтобы убедиться, что они соответствуют ожидаемым требованиям. Вычисление интеграла: Алгоритм разбивает область интегрирования на равные отрезки (трапеции) и для каждой трапеции вычисляет её площадь, складывая результаты Описание параллельной задачи (MPI): В случае параллельного интегрирования, каждый процесс обрабатывает определённый набор трапеций. Это достигается с помощью цикла, где каждый процесс выполняет расчёты для индексов, соответствующих его рангу и размеру (количеству процессов). Например, процесс с рангом rank будет обрабатывать трапеции с индексами, начиная с rank и увеличивая на количество процессов (size), что позволяет равномерно распределить нагрузку. Локальные вычисления: Каждый процесс вычисляет свою локальную сумму значений функции на своих трапециях. В случае первого процесса (с рангом 0), он также добавляет значения для крайних точек (a и b) в свою локальную сумму. Сбор результатов: После того как все процессы завершили свои вычисления, локальные результаты собираются в корневом процессе с помощью функции reduce --- .../func_tests/main.cpp | 372 ++++++++++++++++++ .../include/ops_mpi.hpp | 62 +++ .../perf_tests/main.cpp | 91 +++++ .../gusev_n_trapezoidal_rule/src/ops_mpi.cpp | 123 ++++++ .../func_tests/main.cpp | 147 +++++++ .../include/ops_seq.hpp | 34 ++ .../perf_tests/main.cpp | 92 +++++ .../gusev_n_trapezoidal_rule/src/ops_seq.cpp | 56 +++ 8 files changed, 977 insertions(+) create mode 100644 tasks/mpi/gusev_n_trapezoidal_rule/func_tests/main.cpp create mode 100644 tasks/mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp create mode 100644 tasks/mpi/gusev_n_trapezoidal_rule/perf_tests/main.cpp create mode 100644 tasks/mpi/gusev_n_trapezoidal_rule/src/ops_mpi.cpp create mode 100644 tasks/seq/gusev_n_trapezoidal_rule/func_tests/main.cpp create mode 100644 tasks/seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp create mode 100644 tasks/seq/gusev_n_trapezoidal_rule/perf_tests/main.cpp create mode 100644 tasks/seq/gusev_n_trapezoidal_rule/src/ops_seq.cpp diff --git a/tasks/mpi/gusev_n_trapezoidal_rule/func_tests/main.cpp b/tasks/mpi/gusev_n_trapezoidal_rule/func_tests/main.cpp new file mode 100644 index 00000000000..a7c1de0ed2f --- /dev/null +++ b/tasks/mpi/gusev_n_trapezoidal_rule/func_tests/main.cpp @@ -0,0 +1,372 @@ +#define _USE_MATH_DEFINES +#include + +#include +#include +#include +#include +#include + +#include "mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp" + +TEST(gusev_n_trapezoidal_rule_mpi, ConstantFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = 10.0; + int intervals = 1000000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, SquareFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = 5.0; + int intervals = 1000000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, SineFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = M_PI; + int intervals = 1000000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, ExponentialFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = 1.0; + int intervals = 1000000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return std::exp(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return std::exp(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, RemainderCaseTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = 5.0; + int intervals = 1000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, RandomizedConstantFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution dis(0.0, 10.0); + int intervals = 1000000; + + double lower_bound = dis(gen); + double upper_bound = dis(gen); + if (lower_bound > upper_bound) std::swap(lower_bound, upper_bound); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, RandomizedSineFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution dis(0.0, M_PI); + int intervals = 1000000; + + double lower_bound = dis(gen); + double upper_bound = dis(gen); + if (lower_bound > upper_bound) std::swap(lower_bound, upper_bound); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} \ No newline at end of file diff --git a/tasks/mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp b/tasks/mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp new file mode 100644 index 00000000000..78a8459ba67 --- /dev/null +++ b/tasks/mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace gusev_n_trapezoidal_rule_mpi { + +class TrapezoidalIntegrationSequential : public ppc::core::Task { + public: + explicit TrapezoidalIntegrationSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + static double integrate(const std::function& f, double a, double b, int n); + double a_{}; + double b_{}; + int n_{}; + double result_{}; + std::function func_; +}; + +class TrapezoidalIntegrationParallel : public ppc::core::Task { + public: + explicit TrapezoidalIntegrationParallel(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + double parallel_integrate(const std::function& f, double a, double b, int n); + + double a_{}; + double b_{}; + int n_{}; + double global_result_{}; + std::function func_; + + boost::mpi::communicator world; +}; + +} // namespace gusev_n_trapezoidal_rule_mpi \ No newline at end of file diff --git a/tasks/mpi/gusev_n_trapezoidal_rule/perf_tests/main.cpp b/tasks/mpi/gusev_n_trapezoidal_rule/perf_tests/main.cpp new file mode 100644 index 00000000000..1168c39d2bb --- /dev/null +++ b/tasks/mpi/gusev_n_trapezoidal_rule/perf_tests/main.cpp @@ -0,0 +1,91 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp" + +TEST(gusev_n_trapezoidal_rule_mpi, test_pipeline_run) { + boost::mpi::communicator world; + double a = 0.0; + double b = 1.0; + int n = 100000000; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(&a)); + taskDataPar->inputs.push_back(reinterpret_cast(&b)); + taskDataPar->inputs.push_back(reinterpret_cast(&n)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + testMpiTaskParallel->set_function([](double x) { return x * x; }); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double exact = 1.0 / 3.0; + EXPECT_NEAR(output, exact, 1e-4); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, test_task_run) { + boost::mpi::communicator world; + double a = 0.0; + double b = 1.0; + int n = 100000000; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(&a)); + taskDataPar->inputs.push_back(reinterpret_cast(&b)); + taskDataPar->inputs.push_back(reinterpret_cast(&n)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + testMpiTaskParallel->set_function([](double x) { return x * x; }); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double exact = 1.0 / 3.0; + EXPECT_NEAR(output, exact, 1e-4); + } +} diff --git a/tasks/mpi/gusev_n_trapezoidal_rule/src/ops_mpi.cpp b/tasks/mpi/gusev_n_trapezoidal_rule/src/ops_mpi.cpp new file mode 100644 index 00000000000..da4cd539a51 --- /dev/null +++ b/tasks/mpi/gusev_n_trapezoidal_rule/src/ops_mpi.cpp @@ -0,0 +1,123 @@ +#include "mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::pre_processing() { + internal_order_test(); + + auto* tmp_ptr_a = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_b = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_n = reinterpret_cast(taskData->inputs[2]); + + a_ = *tmp_ptr_a; + b_ = *tmp_ptr_b; + n_ = *tmp_ptr_n; + + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::run() { + internal_order_test(); + result_ = integrate(func_, a_, b_, n_); + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result_; + return true; +} + +double gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::integrate(const std::function& f, + double a, double b, int n) { + double h = (b - a) / n; + double sum = 0.5 * (f(a) + f(b)); + + for (int i = 1; i < n; ++i) { + double x = a + i * h; + sum += f(x); + } + + return sum * h; +} + +void gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::set_function( + const std::function& func) { + func_ = func; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto* tmp_ptr_a = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_b = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_n = reinterpret_cast(taskData->inputs[2]); + + a_ = *tmp_ptr_a; + b_ = *tmp_ptr_b; + n_ = *tmp_ptr_n; + } + + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::run() { + internal_order_test(); + MPI_Bcast(&a_, sizeof(a_) + sizeof(b_) + sizeof(n_), MPI_BYTE, 0, world); + double local_result = parallel_integrate(func_, a_, b_, n_); + reduce(world, local_result, global_result_, std::plus<>(), 0); + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = global_result_; + } + return true; +} + +double gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::parallel_integrate( + const std::function& f, double a, double b, int n) { + int rank = world.rank(); + int size = world.size(); + + double h = (b - a) / n; + double local_sum = 0.0; + + for (int i = rank; i < n; i += size) { + double x = a + i * h; + local_sum += f(x); + } + + if (rank == 0) { + local_sum += 0.5 * (f(a) + f(b)); + } + + return local_sum * h; +} + +void gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::set_function( + const std::function& func) { + func_ = func; +} \ No newline at end of file diff --git a/tasks/seq/gusev_n_trapezoidal_rule/func_tests/main.cpp b/tasks/seq/gusev_n_trapezoidal_rule/func_tests/main.cpp new file mode 100644 index 00000000000..f6628b0e1e6 --- /dev/null +++ b/tasks/seq/gusev_n_trapezoidal_rule/func_tests/main.cpp @@ -0,0 +1,147 @@ +// Copyright 2023 Nesterov Alexander +#define _USE_MATH_DEFINES +#include + +#include +#include + +#include "seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp" + +TEST(gusev_n_trapezoidal_rule_seq, test_integration_x_squared) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + ASSERT_TRUE(testTaskSequential->validation()); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_integration_x) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + + const double expected_result = 0.5; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return x; }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_integration_sin_x) { + const double a = 0.0; + const double b = M_PI; + const int n = 1000; + + const double expected_result = 2.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return std::sin(x); }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_integration_exp_x) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + + const double expected_result = std::exp(1.0) - 1.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return std::exp(x); }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_set_function) { + std::vector in = {0.0, 1.0, 1000}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential.set_function(func); + + double x = 2.0; + double expected_result = 4.0; + ASSERT_EQ(func(x), expected_result); +} diff --git a/tasks/seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp b/tasks/seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp new file mode 100644 index 00000000000..978d61fa031 --- /dev/null +++ b/tasks/seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp @@ -0,0 +1,34 @@ +// Copyright 2024 Nesterov Alexander +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace gusev_n_trapezoidal_rule_seq { + +class TrapezoidalIntegrationSequential : public ppc::core::Task { + public: + explicit TrapezoidalIntegrationSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + static double integrate(const std::function& f, double a, double b, int n); + + double a_{}; + double b_{}; + int n_{}; + double result_{}; + std::function func_; +}; + +} // namespace gusev_n_trapezoidal_rule_seq \ No newline at end of file diff --git a/tasks/seq/gusev_n_trapezoidal_rule/perf_tests/main.cpp b/tasks/seq/gusev_n_trapezoidal_rule/perf_tests/main.cpp new file mode 100644 index 00000000000..f8aa8d4adb7 --- /dev/null +++ b/tasks/seq/gusev_n_trapezoidal_rule/perf_tests/main.cpp @@ -0,0 +1,92 @@ +// Copyright 2024 Nesterov Alexander +#define _USE_MATH_DEFINES +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp" + +TEST(gusev_n_trapezoidal_rule_seq, test_pipeline_run) { + const double a = 0.0; + const double b = 1.0; + const int n = 10000000; + + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_task_run) { + const double a = 0.0; + const double b = 1.0; + const int n = 10000000; + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + + perfAnalyzer->task_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} diff --git a/tasks/seq/gusev_n_trapezoidal_rule/src/ops_seq.cpp b/tasks/seq/gusev_n_trapezoidal_rule/src/ops_seq.cpp new file mode 100644 index 00000000000..251891b2fe3 --- /dev/null +++ b/tasks/seq/gusev_n_trapezoidal_rule/src/ops_seq.cpp @@ -0,0 +1,56 @@ +#include "seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp" + +#include +#include + +bool gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::pre_processing() { + internal_order_test(); + + auto* inputs = reinterpret_cast(taskData->inputs[0]); + + a_ = inputs[0]; + b_ = inputs[1]; + n_ = static_cast(inputs[2]); + + result_ = 0.0; + return true; +} + +bool gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; +} + +bool gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::run() { + internal_order_test(); + + result_ = integrate(func_, a_, b_, n_); + + return true; +} + +bool gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; +} + +double gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::integrate(const std::function& f, + double a, double b, int n) { + double step = (b - a) / n; + double area = 0.0; + + for (int i = 0; i < n; ++i) { + double x0 = a + i * step; + double x1 = a + (i + 1) * step; + area += (f(x0) + f(x1)) * step / 2.0; + } + + return area; +} + +void gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::set_function( + const std::function& func) { + func_ = func; +} \ No newline at end of file From b374c725f3cd3bfc7d20929289e622b614c3bb98 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Sat, 2 Nov 2024 02:51:23 +0100 Subject: [PATCH 045/155] Perf check CI (#121) --- scripts/generate_perf_results.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/generate_perf_results.sh b/scripts/generate_perf_results.sh index e746fe66714..6e590dc6e76 100644 --- a/scripts/generate_perf_results.sh +++ b/scripts/generate_perf_results.sh @@ -1,3 +1,3 @@ mkdir build/perf_stat_dir -source scripts/run_perf_collector.sh &> build/perf_stat_dir/perf_log.txt +source scripts/run_perf_collector.sh 2>&1 | tee build/perf_stat_dir/perf_log.txt python3 scripts/create_perf_table.py --input build/perf_stat_dir/perf_log.txt --output build/perf_stat_dir From 2c7650920e9a6fd8e1bf6b55258b734f9794c27c Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Sat, 2 Nov 2024 09:52:42 +0800 Subject: [PATCH 046/155] =?UTF-8?q?Revert=20"=D0=9A=D1=83=D1=80=D0=B0?= =?UTF-8?q?=D0=BA=D0=B8=D0=BD=20=D0=9C=D0=B0=D1=82=D0=B2=D0=B5=D0=B9.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2017.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D1=80?= =?UTF-8?q?=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86?= =?UTF-8?q?=D1=8B."=20(#119)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#15 https://github.com/learning-process/ppc-2024-autumn/pull/15#discussion_r1826402223 --- .../func_tests/main.cpp | 363 ------------------ .../include/ops_mpi.hpp | 50 --- .../perf_tests/main.cpp | 104 ----- .../src/ops_mpi.cpp | 150 -------- .../func_tests/main.cpp | 131 ------- .../include/ops_seq.hpp | 26 -- .../perf_tests/main.cpp | 100 ----- .../src/ops_seq.cpp | 46 --- 8 files changed, 970 deletions(-) delete mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp delete mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp delete mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp delete mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp delete mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp delete mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp delete mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp delete mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp deleted file mode 100644 index ec80d2b3548..00000000000 --- a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include - -#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_3_5) { - int count_rows = 3; - int size_rows = 5; - boost::mpi::communicator world; - std::vector global_mat; - std::vector ans; - std::vector par_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = {1, 5, 3, 7, 9, 3, 4, 6, 7, 9, 2, 4, 2, 5, 0}; - ans = {1, 3, 0}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, ans); - ASSERT_EQ(par_min_vec, ans); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_3_6) { - int count_rows = 3; - int size_rows = 6; - boost::mpi::communicator world; - std::vector global_mat; - std::vector ans; - std::vector par_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9}; - ans = {3, 4, 2}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, ans); - ASSERT_EQ(par_min_vec, ans); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_4_5) { - int count_rows = 4; - int size_rows = 5; - boost::mpi::communicator world; - std::vector global_mat; - std::vector ans; - std::vector par_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9, 5, 8}; - ans = {3, 4, 2, 5}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, ans); - ASSERT_EQ(par_min_vec, ans); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_12) { - int count_rows = 10; - int size_rows = 12; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, par_min_vec); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_15) { - int count_rows = 10; - int size_rows = 15; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, par_min_vec); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_2) { - int count_rows = 10; - int size_rows = 2; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, par_min_vec); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_0_0) { - int count_rows = 0; - int size_rows = 0; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_FALSE(testMpiTaskParallel.validation()); - } -} diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp deleted file mode 100644 index 68621f67642..00000000000 --- a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace kurakin_m_min_values_by_rows_matrix_mpi { - -std::vector getRandomVector(int sz); - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int count_rows{}; - int size_rows{}; - std::vector input_; - std::vector res; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int count_rows{}; - int size_rows{}; - std::vector input_, local_input_; - std::vector res; - boost::mpi::communicator world; -}; - -} // namespace kurakin_m_min_values_by_rows_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp deleted file mode 100644 index a290fa90e1a..00000000000 --- a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" - -TEST(kurakin_m_min_values_by_rows_matrix_mpi_perf_test, test_pipeline_run) { - int count_rows = 100; - int size_rows = 400; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = std::vector(count_rows * size_rows, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - for (unsigned i = 0; i < par_min_vec.size(); i++) { - EXPECT_EQ(1, par_min_vec[0]); - } - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi_perf_test, test_task_run) { - int count_rows = 100; - int size_rows = 400; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = std::vector(count_rows * size_rows, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - for (unsigned i = 0; i < par_min_vec.size(); i++) { - EXPECT_EQ(1, par_min_vec[0]); - } - } -} diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp deleted file mode 100644 index db05f44e74a..00000000000 --- a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -std::vector kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - - count_rows = (int)*taskData->inputs[1]; - size_rows = (int)*taskData->inputs[2]; - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - res = std::vector(count_rows, 0); - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - - return *taskData->inputs[1] != 0 && *taskData->inputs[2] != 0 && *taskData->inputs[1] == taskData->outputs_count[0]; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::run() { - internal_order_test(); - - for (int i = 0; i < count_rows; i++) { - res[i] = *std::min_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - - for (int i = 0; i < count_rows; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res[i]; - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - count_rows = 0; - size_rows = 0; - - unsigned int delta = 0; - - if (world.rank() == 0) { - count_rows = (int)*taskData->inputs[1]; - size_rows = (int)*taskData->inputs[2]; - if (taskData->inputs_count[0] % world.size() == 0) { - delta = taskData->inputs_count[0] / world.size(); - } else { - delta = taskData->inputs_count[0] / world.size() + 1; - } - } - - broadcast(world, count_rows, 0); - broadcast(world, size_rows, 0); - broadcast(world, delta, 0); - - if (world.rank() == 0) { - input_ = std::vector(delta * world.size(), INT_MAX); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - } - - local_input_ = std::vector(delta); - boost::mpi::scatter(world, input_.data(), local_input_.data(), delta, 0); - - res = std::vector(count_rows, INT_MAX); - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - return *taskData->inputs[1] != 0 && *taskData->inputs[2] != 0 && *taskData->inputs[1] == taskData->outputs_count[0]; - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - unsigned int last_delta = 0; - if (world.rank() == world.size() - 1) { - last_delta = local_input_.size() * world.size() - size_rows * count_rows; - } - - unsigned int ind = world.rank() * local_input_.size() / size_rows; - for (unsigned int i = 0; i < ind; ++i) { - reduce(world, INT_MAX, res[i], boost::mpi::minimum(), 0); - } - - unsigned int delta = std::min(local_input_.size(), size_rows - world.rank() * local_input_.size() % size_rows); - int local_res; - - local_res = *std::min_element(local_input_.begin(), local_input_.begin() + delta); - reduce(world, local_res, res[ind], boost::mpi::minimum(), 0); - ++ind; - - unsigned int k = 0; - while (local_input_.begin() + delta + k * size_rows < local_input_.end() - last_delta) { - local_res = *std::min_element(local_input_.begin() + delta + k * size_rows, - std::min(local_input_.end(), local_input_.begin() + delta + (k + 1) * size_rows)); - reduce(world, local_res, res[ind], boost::mpi::minimum(), 0); - ++k; - ++ind; - } - - for (unsigned int i = ind; i < res.size(); ++i) { - reduce(world, INT_MAX, res[i], boost::mpi::minimum(), 0); - } - - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - - if (world.rank() == 0) { - for (int i = 0; i < count_rows; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res[i]; - } - } - return true; -} diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp deleted file mode 100644 index 480eccc6ab7..00000000000 --- a/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" - -TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min1) { - int count_rows; - int size_rows; - - // Create data - count_rows = 3; - size_rows = 5; - std::vector global_mat = {1, 5, 3, 7, 9, 3, 4, 6, 7, 9, 2, 4, 2, 5, 0}; - - std::vector seq_min_vec(count_rows, 0); - std::vector ans = {1, 3, 0}; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, seq_min_vec); -} - -TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min2) { - int count_rows; - int size_rows; - - // Create data - count_rows = 3; - size_rows = 6; - std::vector global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9}; - - std::vector seq_min_vec(count_rows, 0); - std::vector ans = {3, 4, 2}; - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, seq_min_vec); -} - -TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min3) { - int count_rows; - int size_rows; - - // Create data - count_rows = 4; - size_rows = 5; - - std::vector global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9, 5, 8}; - - std::vector seq_min_vec(count_rows, 0); - std::vector ans = {3, 4, 2, 5}; - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, seq_min_vec); -} - -TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min_null) { - int count_rows; - int size_rows; - // Create data - count_rows = 0; - size_rows = 0; - std::vector global_mat(count_rows * size_rows); - std::vector seq_min_vec(count_rows, 0); - std::vector ans(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - // Create Task - kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(seq_min_vec, ans); -} \ No newline at end of file diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp deleted file mode 100644 index 6c4d04360c4..00000000000 --- a/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace kurakin_m_min_values_by_rows_matrix_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int count_rows{}; - int size_rows{}; - std::vector input_; - std::vector res; -}; - -} // namespace kurakin_m_min_values_by_rows_matrix_seq \ No newline at end of file diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp deleted file mode 100644 index b07bae29d92..00000000000 --- a/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" - -TEST(kurakin_m_min_values_by_rows_matrix_seq, test_pipeline_run) { - int count_rows; - int size_rows; - - // Create data - count_rows = 100; - size_rows = 400; - std::vector global_mat(count_rows * size_rows, 1); - std::vector seq_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - for (size_t i = 0; i < seq_min_vec.size(); i++) { - EXPECT_EQ(1, seq_min_vec[0]); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_seq, test_task_run) { - int count_rows; - int size_rows; - - // Create data - count_rows = 100; - size_rows = 400; - std::vector global_mat(count_rows * size_rows, 1); - std::vector seq_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - for (unsigned i = 0; i < seq_min_vec.size(); i++) { - EXPECT_EQ(1, seq_min_vec[0]); - } -} diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp deleted file mode 100644 index 9219594818e..00000000000 --- a/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2024 Nesterov Alexander -#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" - -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - // Init value for output - count_rows = (int)*taskData->inputs[1]; - size_rows = (int)*taskData->inputs[2]; - res = std::vector(count_rows, 0); - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::validation() { - internal_order_test(); - // Check count elements of output - return *taskData->inputs[1] == taskData->outputs_count[0]; -} - -bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::run() { - internal_order_test(); - for (int i = 0; i < count_rows; i++) { - res[i] = *std::min_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::post_processing() { - internal_order_test(); - for (int i = 0; i < count_rows; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res[i]; - } - return true; -} From afaedb593d2ee1d9ed6ac5cb3cb595d36c1ef04b Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Sat, 2 Nov 2024 10:02:17 +0800 Subject: [PATCH 047/155] =?UTF-8?q?Revert=20"=D0=9A=D0=BE=D0=BB=D0=BE?= =?UTF-8?q?=D0=B4=D0=BA=D0=B8=D0=BD=20=D0=93=D1=80=D0=B8=D0=B3=D0=BE=D1=80?= =?UTF-8?q?=D0=B8=D0=B9.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2025.=20=D0=9F=D0=BE=D0=B4?= =?UTF-8?q?=D1=81=D1=87=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20?= =?UTF-8?q?=D0=BF=D1=80=D0=B5=D0=B4=D0=BB=D0=BE=D0=B6=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B9=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5."=20(#12?= =?UTF-8?q?2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#19 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11638887120/job/32414315037 image --- .../func_tests/main.cpp | 306 ------------------ .../include/ops_mpi.hpp | 46 --- .../perf_tests/main.cpp | 95 ------ .../kolodkin_g_sentence_count/src/ops_mpi.cpp | 101 ------ .../func_tests/main.cpp | 115 ------- .../include/ops_seq.hpp | 24 -- .../perf_tests/main.cpp | 79 ----- .../kolodkin_g_sentence_count/src/ops_seq.cpp | 35 -- 8 files changed, 801 deletions(-) delete mode 100644 tasks/mpi/kolodkin_g_sentence_count/func_tests/main.cpp delete mode 100644 tasks/mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp delete mode 100644 tasks/mpi/kolodkin_g_sentence_count/perf_tests/main.cpp delete mode 100644 tasks/mpi/kolodkin_g_sentence_count/src/ops_mpi.cpp delete mode 100644 tasks/seq/kolodkin_g_sentence_count/func_tests/main.cpp delete mode 100644 tasks/seq/kolodkin_g_sentence_count/include/ops_seq.hpp delete mode 100644 tasks/seq/kolodkin_g_sentence_count/perf_tests/main.cpp delete mode 100644 tasks/seq/kolodkin_g_sentence_count/src/ops_seq.cpp diff --git a/tasks/mpi/kolodkin_g_sentence_count/func_tests/main.cpp b/tasks/mpi/kolodkin_g_sentence_count/func_tests/main.cpp deleted file mode 100644 index c226110bfa7..00000000000 --- a/tasks/mpi/kolodkin_g_sentence_count/func_tests/main.cpp +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include - -#include "mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp" - -TEST(Parallel_Operations_MPI, Test_empty_string) { - boost::mpi::communicator world; - std::vector global_str; - - // Create data - std::vector global_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataMpi = std::make_shared(); - if (world.rank() == 0) { - taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataMpi->inputs_count.emplace_back(global_str.size()); - taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); - taskDataMpi->outputs_count.emplace_back(global_out.size()); - } - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataSeq->inputs_count.emplace_back(global_str.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); - taskDataSeq->outputs_count.emplace_back(reference_out.size()); - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(reference_out[0], global_out[0]); - ASSERT_EQ(reference_out[0], 0); - } -} - -TEST(Parallel_Operations_MPI, Test_two_sentences) { - boost::mpi::communicator world; - std::vector global_str; - - // Create data - std::vector global_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataMpi = std::make_shared(); - if (world.rank() == 0) { - std::string str = "Hello!My name is Grisha!"; - for (unsigned long int i = 0; i < str.length(); i++) { - global_str.push_back(str[i]); - } - taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataMpi->inputs_count.emplace_back(global_str.size()); - taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); - taskDataMpi->outputs_count.emplace_back(global_out.size()); - } - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataSeq->inputs_count.emplace_back(global_str.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); - taskDataSeq->outputs_count.emplace_back(reference_out.size()); - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(reference_out[0], global_out[0]); - ASSERT_EQ(reference_out[0], 2); - } -} - -TEST(Parallel_Operations_MPI, Test_sentences_with_special_symbols) { - boost::mpi::communicator world; - std::vector global_str; - - // Create data - std::vector global_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataMpi = std::make_shared(); - if (world.rank() == 0) { - std::string str = "Hello! My name is Grisha! I have cat,dog,parrot."; - for (unsigned long int i = 0; i < str.length(); i++) { - global_str.push_back(str[i]); - } - taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataMpi->inputs_count.emplace_back(global_str.size()); - taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); - taskDataMpi->outputs_count.emplace_back(global_out.size()); - } - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataSeq->inputs_count.emplace_back(global_str.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); - taskDataSeq->outputs_count.emplace_back(reference_out.size()); - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(reference_out[0], global_out[0]); - ASSERT_EQ(reference_out[0], 3); - } -} - -TEST(Parallel_Operations_MPI, Test_sentences_with_special_symbols_in_end_of_sentence) { - boost::mpi::communicator world; - std::vector global_str; - - // Create data - std::vector global_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataMpi = std::make_shared(); - if (world.rank() == 0) { - std::string str = "Hello! My name is Grisha! I have cat, dog, parrot. What is your name? How are you? Well..."; - for (unsigned long int i = 0; i < str.length(); i++) { - global_str.push_back(str[i]); - } - taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataMpi->inputs_count.emplace_back(global_str.size()); - taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); - taskDataMpi->outputs_count.emplace_back(global_out.size()); - } - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataSeq->inputs_count.emplace_back(global_str.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); - taskDataSeq->outputs_count.emplace_back(reference_out.size()); - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(reference_out[0], global_out[0]); - ASSERT_EQ(reference_out[0], 6); - } -} -TEST(Parallel_Operations_MPI, Test_sentences_with_double_symbols) { - boost::mpi::communicator world; - std::vector global_str; - - // Create data - std::vector global_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataMpi = std::make_shared(); - if (world.rank() == 0) { - std::string str = - "Hello!! My name is Grisha!! I have two pets: cat,dog,parrot. What is your name?! How are you!? Well..."; - for (unsigned long int i = 0; i < str.length(); i++) { - global_str.push_back(str[i]); - } - taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataMpi->inputs_count.emplace_back(global_str.size()); - taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); - taskDataMpi->outputs_count.emplace_back(global_out.size()); - } - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataSeq->inputs_count.emplace_back(global_str.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); - taskDataSeq->outputs_count.emplace_back(reference_out.size()); - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(reference_out[0], global_out[0]); - ASSERT_EQ(reference_out[0], 6); - } -} -TEST(Parallel_Operations_MPI, Big_text) { - boost::mpi::communicator world; - std::vector global_str; - - // Create data - std::vector global_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataMpi = std::make_shared(); - if (world.rank() == 0) { - std::string str = - "Otche nash, ize esi na nebeseh! Da svytitsa imya tvoe, da priidet tsarstvo tvoe! Da budet volya tvoya, ako na " - "nebeseh i na zemle. Hleb nas nasyshnii dazd nam dnes, i ostavi nam dolgi nasha. Yakozhe i my ostavlyaem " - "dolznikom nashim! I ne vvedi nas vo iskushenie, no izbavi nas ot lukavogo... Amin!"; - for (unsigned long int i = 0; i < str.length(); i++) { - global_str.push_back(str[i]); - } - taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataMpi->inputs_count.emplace_back(global_str.size()); - taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); - taskDataMpi->outputs_count.emplace_back(global_out.size()); - } - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataSeq->inputs_count.emplace_back(global_str.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); - taskDataSeq->outputs_count.emplace_back(reference_out.size()); - - // Create Task - kolodkin_g_sentence_count_mpi::TestMPITaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(reference_out[0], global_out[0]); - ASSERT_EQ(reference_out[0], 7); - } -} \ No newline at end of file diff --git a/tasks/mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp b/tasks/mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp deleted file mode 100644 index b4599adbf35..00000000000 --- a/tasks/mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace kolodkin_g_sentence_count_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - int res{}; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - int res{}; - int localSentenceCount{}; - boost::mpi::communicator world; -}; - -} // namespace kolodkin_g_sentence_count_mpi \ No newline at end of file diff --git a/tasks/mpi/kolodkin_g_sentence_count/perf_tests/main.cpp b/tasks/mpi/kolodkin_g_sentence_count/perf_tests/main.cpp deleted file mode 100644 index b37ea023149..00000000000 --- a/tasks/mpi/kolodkin_g_sentence_count/perf_tests/main.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp" - -TEST(mpi_kolodkin_g_sentence_count_test, test_pipeline_run) { - boost::mpi::communicator world; - std::vector global_str; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::string str = - "verifwriefnifnil!?vfnjklererjerjkerg...vrhklererffwjklfwefwejo!vefnklvevef?wfnkrkflwewefkl!vfnklvfklevf?" - "vrrnervevrnvreiev!"; - for (unsigned long int i = 0; i < str.length(); i++) { - global_str.push_back(str[i]); - } - taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataPar->inputs_count.emplace_back(global_str.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(7, global_sum[0]); - } -} - -TEST(mpi_kolodkin_g_sentence_count_test, test_task_run) { - boost::mpi::communicator world; - std::vector global_str; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::string str = - "Na krayu dorogi stoyal dub! Eto byl ogromnuy, v dva obhvata dub. Knyaz Andrey podosel k dubu! Boze prabiy! " - "Kak " - "tebya zovut? Ya dub! A ya knyaz Andrey! Zdorovo! Poka-poka, dub! Poka, Andrey!"; - for (unsigned long int i = 0; i < str.length(); i++) { - global_str.push_back(str[i]); - } - taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); - taskDataPar->inputs_count.emplace_back(global_str.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(10, global_sum[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/kolodkin_g_sentence_count/src/ops_mpi.cpp b/tasks/mpi/kolodkin_g_sentence_count/src/ops_mpi.cpp deleted file mode 100644 index 3bebc56e8a6..00000000000 --- a/tasks/mpi/kolodkin_g_sentence_count/src/ops_mpi.cpp +++ /dev/null @@ -1,101 +0,0 @@ -#include "mpi/kolodkin_g_sentence_count/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool kolodkin_g_sentence_count_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - res = 0; - return true; -} - -bool kolodkin_g_sentence_count_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - return taskData->outputs_count[0] == 1; -} - -bool kolodkin_g_sentence_count_mpi::TestMPITaskSequential::run() { - internal_order_test(); - for (unsigned long i = 0; i < input_.size(); i++) { - if ((input_[i] == '.' || input_[i] == '!' || input_[i] == '?') && - ((input_[i + 1] != '.' && input_[i + 1] != '!' && input_[i + 1] != '?') || i + 1 == input_.size())) { - res++; - } - } - return true; -} - -bool kolodkin_g_sentence_count_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} - -bool kolodkin_g_sentence_count_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - unsigned int delta = 0; - if (world.rank() == 0) { - delta = taskData->inputs_count[0] / world.size(); - } - broadcast(world, delta, 0); - - if (world.rank() == 0) { - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - } - local_input_.resize(delta); - if (world.rank() == 0) { - for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, input_.data() + proc * delta, delta); - } - local_input_ = std::vector(input_.begin(), input_.begin() + delta); - } else { - world.recv(0, 0, local_input_.data(), delta); - } - localSentenceCount = 0; - res = 0; - return true; -} - -bool kolodkin_g_sentence_count_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - return taskData->outputs_count[0] == 1; - } - return true; -} - -bool kolodkin_g_sentence_count_mpi::TestMPITaskParallel::run() { - internal_order_test(); - for (unsigned long i = 0; i < local_input_.size(); i++) { - if ((local_input_[i] == '.' || local_input_[i] == '!' || local_input_[i] == '?') && - ((local_input_[i + 1] != '.' && local_input_[i + 1] != '!' && local_input_[i + 1] != '?') || - i + 1 == local_input_.size())) { - localSentenceCount++; - } - } - reduce(world, localSentenceCount, res, std::plus<>(), 0); - return true; -} - -bool kolodkin_g_sentence_count_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = res; - } - return true; -} diff --git a/tasks/seq/kolodkin_g_sentence_count/func_tests/main.cpp b/tasks/seq/kolodkin_g_sentence_count/func_tests/main.cpp deleted file mode 100644 index 37a6bd9f7a8..00000000000 --- a/tasks/seq/kolodkin_g_sentence_count/func_tests/main.cpp +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "seq/kolodkin_g_sentence_count/include/ops_seq.hpp" - -TEST(Sequential, Test_two_sentences) { - // Create data - std::string str = "Hello! My name is Grisha!"; - std::vector in(1, str); - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(out[0], 2); -} -TEST(Sequential, Test_sentences_with_special_symbols) { - // Create data - std::string str = "Hello!My name is Grisha! I have two pets: cat,dog,parrot."; - std::vector out(1, 0); - std::vector in(1, str); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(out[0], 3); -} -TEST(Sequential, Test_sentences_with_special_symbols_in_end_of_sentence) { - // Create data - std::string str = - "Hello!My name is Grisha! I have two pets: cat,dog,parrot. What is your name?! How are you!? Well..."; - std::vector in(1, str); - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(out[0], 6); -} -TEST(Sequential, Test_sentences_with_double_symbols) { - // Create data - std::string str = - "Hello!! My name is Grisha!! I have two pets: cat,dog,parrot. What is your name?! How are you!? Well..."; - std::vector out(1, 0); - std::vector in(1, str); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(out[0], 6); -} -TEST(Sequential, Big_text) { - // Create data - std::string str = - "Otche nash, ize esi na nebeseh! Da svytitsa imya tvoe, da priidet tsarstvo tvoe! Da budet volya tvoya, ako na " - "nebeseh i na zemle. Hleb nas nasyshnii dazd nam dnes, i ostavi nam dolgi nasha. Yakozhe i my ostavlyaem " - "dolznikom nashim! I ne vvedi nas vo iskushenie, no izbavi nas ot lukavogo... Amin!"; - std::vector out(1, 0); - std::vector in(1, str); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - kolodkin_g_sentence_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(out[0], 7); -} \ No newline at end of file diff --git a/tasks/seq/kolodkin_g_sentence_count/include/ops_seq.hpp b/tasks/seq/kolodkin_g_sentence_count/include/ops_seq.hpp deleted file mode 100644 index 05b6347b5b1..00000000000 --- a/tasks/seq/kolodkin_g_sentence_count/include/ops_seq.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace kolodkin_g_sentence_count_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::string input_{}; - int res{}; -}; - -} // namespace kolodkin_g_sentence_count_seq \ No newline at end of file diff --git a/tasks/seq/kolodkin_g_sentence_count/perf_tests/main.cpp b/tasks/seq/kolodkin_g_sentence_count/perf_tests/main.cpp deleted file mode 100644 index 379de76bf08..00000000000 --- a/tasks/seq/kolodkin_g_sentence_count/perf_tests/main.cpp +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/kolodkin_g_sentence_count/include/ops_seq.hpp" - -TEST(seq_kolodkin_g_sentence_count_test, test_pipeline_run) { - // Create data - std::string str = - "verifwriefnifnil!?vfnjklererjerjkerg...vrhklererffwjklfwefwejo!vefnklvevef?wfnkrkflwewefkl!vfnklvfklevf?" - "vrrnervevrnvreiev!"; - std::vector out(1, 0); - std::vector in(1, str); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(7, out[0]); -} - -TEST(seq_kolodkin_g_sentence_count_test, test_task_run) { - // Create data - std::string str = "Hello! My name is Grisha! Good morning! How are you!"; - std::vector out(1, 0); - std::vector in(1, str); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(4, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/kolodkin_g_sentence_count/src/ops_seq.cpp b/tasks/seq/kolodkin_g_sentence_count/src/ops_seq.cpp deleted file mode 100644 index 55d1276fc76..00000000000 --- a/tasks/seq/kolodkin_g_sentence_count/src/ops_seq.cpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2024 Nesterov Alexander -#include "seq/kolodkin_g_sentence_count/include/ops_seq.hpp" - -#include - -using namespace std::chrono_literals; - -bool kolodkin_g_sentence_count_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - input_ = *reinterpret_cast(taskData->inputs[0]); - res = 0; - return true; -} - -bool kolodkin_g_sentence_count_seq::TestTaskSequential::validation() { - internal_order_test(); - return taskData->outputs_count[0] == 1; -} - -bool kolodkin_g_sentence_count_seq::TestTaskSequential::run() { - internal_order_test(); - for (unsigned long i = 0; i < input_.length(); i++) { - if ((input_[i] == '.' || input_[i] == '!' || input_[i] == '?') && - ((input_[i + 1] != '.' && input_[i + 1] != '!' && input_[i + 1] != '?') || i + 1 == input_.length())) { - res++; - } - } - return true; -} - -bool kolodkin_g_sentence_count_seq::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} From d00cb5a858dea0060ac2e8714f4612eb532e48c8 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Sat, 2 Nov 2024 10:04:26 +0800 Subject: [PATCH 048/155] [CI] Add testing on different proc count (#117) --- .github/workflows/main.yml | 29 +++++++++++++++++++++++++++-- scripts/run.sh | 8 ++++---- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b4889117e87..9d26069a9fd 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -45,9 +45,20 @@ jobs: env: CC: gcc-13 CXX: g++-13 - - name: Run func tests + - name: Run func tests (num_proc=2) run: | export OMP_NUM_THREADS=4 + export PROC_COUNT=2 + source scripts/run.sh + - name: Run func tests (num_proc=3) + run: | + export OMP_NUM_THREADS=4 + export PROC_COUNT=3 + source scripts/run.sh + - name: Run func tests (num_proc=4) + run: | + export OMP_NUM_THREADS=4 + export PROC_COUNT=4 source scripts/run.sh ubuntu-clang-build: runs-on: ubuntu-latest @@ -86,9 +97,20 @@ jobs: env: CC: clang-18 CXX: clang++-18 - - name: Run tests + - name: Run func tests (num_proc=2) + run: | + export OMP_NUM_THREADS=4 + export PROC_COUNT=2 + source scripts/run.sh + - name: Run func tests (num_proc=3) + run: | + export OMP_NUM_THREADS=4 + export PROC_COUNT=3 + source scripts/run.sh + - name: Run func tests (num_proc=4) run: | export OMP_NUM_THREADS=4 + export PROC_COUNT=4 source scripts/run.sh ubuntu-clang-sanitizer-build: runs-on: ubuntu-latest @@ -131,6 +153,7 @@ jobs: run: | export OMP_NUM_THREADS=4 export ASAN_RUN=1 + export PROC_COUNT=4 source scripts/run.sh macos-clang-build: runs-on: macOS-latest @@ -164,6 +187,7 @@ jobs: - name: Run tests run: | export OMP_NUM_THREADS=4 + export PROC_COUNT=2 source scripts/run.sh windows-msvc-build: runs-on: windows-latest @@ -265,6 +289,7 @@ jobs: - name: Run tests run: | export OMP_NUM_THREADS=4 + export PROC_COUNT=4 source scripts/run.sh - name: Generate gcovr Coverage Data run: | diff --git a/scripts/run.sh b/scripts/run.sh index 048adc68708..8e751e5c8fa 100644 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -16,11 +16,11 @@ fi if [[ -z "$ASAN_RUN" ]]; then if [[ $OSTYPE == "linux-gnu" ]]; then - mpirun --oversubscribe -np 4 ./build/bin/sample_mpi - mpirun --oversubscribe -np 4 ./build/bin/sample_mpi_boost + mpirun --oversubscribe -np $PROC_COUNT ./build/bin/sample_mpi + mpirun --oversubscribe -np $PROC_COUNT ./build/bin/sample_mpi_boost elif [[ $OSTYPE == "darwin"* ]]; then - mpirun -np 2 ./build/bin/sample_mpi - mpirun -np 2 ./build/bin/sample_mpi_boost + mpirun -np $PROC_COUNT ./build/bin/sample_mpi + mpirun -np $PROC_COUNT ./build/bin/sample_mpi_boost fi fi ./build/bin/sample_omp From 18b794c6d782f1e696a55773c307064b7a28990b Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Sat, 2 Nov 2024 10:10:40 +0800 Subject: [PATCH 049/155] =?UTF-8?q?Revert=20"=D0=9B=D0=BE=D0=BF=D0=B0?= =?UTF-8?q?=D1=82=D0=B8=D0=BD=20=D0=98=D0=BB=D1=8C=D1=8F.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2024.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D1=91=D1=82?= =?UTF-8?q?=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=81=D0=BB=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.=20"=20(#123)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#35 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11638948183/job/32414459755 image --- .../func_tests/countWordsFuncTests.cpp | 181 ------------------ .../include/countWordsMPIHeader.hpp | 47 ----- .../perf_tests/countWordsPerfTests.cpp | 72 ------- .../src/countWordsMPI.cpp | 100 ---------- .../func_tests/countWordsFuncTests.cpp | 97 ---------- .../include/countWordsSeqHeader.hpp | 27 --- .../perf_tests/countWordsPerfTests.cpp | 66 ------- .../src/countWordsSeq.cpp | 52 ----- 8 files changed, 642 deletions(-) delete mode 100644 tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp delete mode 100644 tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp delete mode 100644 tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp delete mode 100644 tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp delete mode 100644 tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp delete mode 100644 tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp delete mode 100644 tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp delete mode 100644 tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp diff --git a/tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp b/tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp deleted file mode 100644 index 970de60179c..00000000000 --- a/tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp +++ /dev/null @@ -1,181 +0,0 @@ -#include - -#include "mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp" - -TEST(lopatin_i_count_words_mpi, test_empty_string) { - boost::mpi::communicator world; - std::vector input = {}; - std::vector wordCount(1, 0); - - std::shared_ptr taskDataParallel = std::make_shared(); - - if (world.rank() == 0) { - taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataParallel->inputs_count.emplace_back(input.size()); - taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); - taskDataParallel->outputs_count.emplace_back(wordCount.size()); - - lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); - ASSERT_FALSE(testTaskParallel.validation()); - } -} - -TEST(lopatin_i_count_words_mpi, test_3_words) { - boost::mpi::communicator world; - std::vector input; - std::string testString = "three funny words"; - for (unsigned long int j = 0; j < testString.length(); j++) { - input.push_back(testString[j]); - } - std::vector wordCount(1, 0); - - std::shared_ptr taskDataParallel = std::make_shared(); - - if (world.rank() == 0) { - taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataParallel->inputs_count.emplace_back(input.size()); - taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); - taskDataParallel->outputs_count.emplace_back(wordCount.size()); - } - - lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector referenceWordCount(1, 0); - std::shared_ptr taskDataSequential = std::make_shared(); - - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); - - lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordCount[0], referenceWordCount[0]); - } -} - -TEST(lopatin_i_count_words_mpi, test_300_words) { - boost::mpi::communicator world; - std::vector input = lopatin_i_count_words_mpi::generateLongString(20); - std::vector wordCount(1, 0); - - std::shared_ptr taskDataParallel = std::make_shared(); - - if (world.rank() == 0) { - taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataParallel->inputs_count.emplace_back(input.size()); - taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); - taskDataParallel->outputs_count.emplace_back(wordCount.size()); - } - - lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector referenceWordCount(1, 0); - std::shared_ptr taskDataSequential = std::make_shared(); - - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); - - lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordCount[0], referenceWordCount[0]); - } -} - -TEST(lopatin_i_count_words_mpi, test_1500_words) { - boost::mpi::communicator world; - std::vector input = lopatin_i_count_words_mpi::generateLongString(100); - std::vector wordCount(1, 0); - - std::shared_ptr taskDataParallel = std::make_shared(); - - if (world.rank() == 0) { - taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataParallel->inputs_count.emplace_back(input.size()); - taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); - taskDataParallel->outputs_count.emplace_back(wordCount.size()); - } - - lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector referenceWordCount(1, 0); - std::shared_ptr taskDataSequential = std::make_shared(); - - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); - - lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordCount[0], referenceWordCount[0]); - } -} - -TEST(lopatin_i_count_words_mpi, test_6k_words) { - boost::mpi::communicator world; - std::vector input = lopatin_i_count_words_mpi::generateLongString(400); - std::vector wordCount(1, 0); - - std::shared_ptr taskDataParallel = std::make_shared(); - - if (world.rank() == 0) { - taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataParallel->inputs_count.emplace_back(input.size()); - taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); - taskDataParallel->outputs_count.emplace_back(wordCount.size()); - } - - lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector referenceWordCount(1, 0); - std::shared_ptr taskDataSequential = std::make_shared(); - - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); - - lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordCount[0], referenceWordCount[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp b/tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp deleted file mode 100644 index f811a89d29c..00000000000 --- a/tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace lopatin_i_count_words_mpi { - -std::vector generateLongString(int n); - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - int wordCount{}; - int spaceCount{}; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - std::vector localInput_; - int wordCount{}; - int spaceCount{}; - int localSpaceCount{}; - boost::mpi::communicator world; -}; - -} // namespace lopatin_i_count_words_mpi \ No newline at end of file diff --git a/tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp b/tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp deleted file mode 100644 index cb9e8d2701d..00000000000 --- a/tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp +++ /dev/null @@ -1,72 +0,0 @@ -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp" - -std::vector testData = lopatin_i_count_words_mpi::generateLongString(2000); - -TEST(lopatin_i_count_words_mpi, test_pipeline_run) { - boost::mpi::communicator world; - std::vector input = testData; - std::vector wordCount(1, 0); - - std::shared_ptr taskData = std::make_shared(); - - if (world.rank() == 0) { - taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(wordCount.data())); - taskData->outputs_count.emplace_back(wordCount.size()); - } - - auto testTask = std::make_shared(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 1000; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTask); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(wordCount[0], 30000); - } -} - -TEST(lopatin_i_count_words_mpi, test_task_run) { - boost::mpi::communicator world; - std::vector input = testData; - std::vector wordCount(1, 0); - - std::shared_ptr taskData = std::make_shared(); - - if (world.rank() == 0) { - taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(wordCount.data())); - taskData->outputs_count.emplace_back(wordCount.size()); - } - - auto testTask = std::make_shared(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 1000; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTask); - perfAnalyzer->task_run(perfAttr, perfResults); - - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(wordCount[0], 30000); - } -} \ No newline at end of file diff --git a/tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp b/tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp deleted file mode 100644 index 2059bd79f3e..00000000000 --- a/tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp +++ /dev/null @@ -1,100 +0,0 @@ -#include "mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp" - -namespace lopatin_i_count_words_mpi { - -std::vector generateLongString(int n) { - std::vector testData; - std::string testString = "This is a long sentence for performance testing of the word count algorithm using MPI. "; - for (int i = 0; i < n; i++) { - for (unsigned long int j = 0; j < testString.length(); j++) { - testData.push_back(testString[j]); - } - } - return testData; -} - -bool TestMPITaskSequential::pre_processing() { - internal_order_test(); - input_ = std::vector(taskData->inputs_count[0]); - auto* tempPtr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tempPtr[i]; - } - return true; -} - -bool TestMPITaskSequential::validation() { - internal_order_test(); - return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; -} - -bool TestMPITaskSequential::run() { - internal_order_test(); - for (char c : input_) { - if (c == ' ') { - spaceCount++; - } - } - wordCount = spaceCount + 1; - return true; -} - -bool TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = wordCount; - return true; -} - -bool TestMPITaskParallel::pre_processing() { - internal_order_test(); - unsigned int chunkSize = 0; - if (world.rank() == 0) { - input_ = std ::vector(taskData->inputs_count[0]); - auto* tmpPtr = reinterpret_cast(taskData->inputs[0]); - for (unsigned long int i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmpPtr[i]; - } - chunkSize = taskData->inputs_count[0] / world.size(); - } - boost::mpi::broadcast(world, chunkSize, 0); - - localInput_.resize(chunkSize); - if (world.rank() == 0) { - for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, input_.data() + proc * chunkSize, chunkSize); - } - localInput_ = std::vector(input_.begin(), input_.begin() + chunkSize); - } else { - world.recv(0, 0, localInput_.data(), chunkSize); - } - return true; -} - -bool TestMPITaskParallel::validation() { - internal_order_test(); - return (world.rank() == 0) ? (taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1) : true; -} - -bool TestMPITaskParallel::run() { - internal_order_test(); - for (char c : localInput_) { - if (c == ' ') { - localSpaceCount++; - } - } - boost::mpi::reduce(world, localSpaceCount, spaceCount, std::plus<>(), 0); - if (world.rank() == 0) { - wordCount = spaceCount + 1; - } - return true; -} - -bool TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = wordCount; - } - return true; -} - -} // namespace lopatin_i_count_words_mpi \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp b/tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp deleted file mode 100644 index 962d2dfd001..00000000000 --- a/tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp +++ /dev/null @@ -1,97 +0,0 @@ -#include - -#include "seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp" - -TEST(lopatin_i_count_words_seq, test_empty_string) { - std::vector input = {}; - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), false); -} - -TEST(lopatin_i_count_words_seq, test_3_words) { - std::vector input; - std::string testString = "three funny words"; - for (unsigned long int j = 0; j < testString.length(); j++) { - input.push_back(testString[j]); - } - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), true); - testTask.pre_processing(); - testTask.run(); - testTask.post_processing(); - - ASSERT_EQ(out[0], 3); -} - -TEST(lopatin_i_count_words_seq, test_300_words) { - std::vector input = lopatin_i_count_words_seq::generateLongString(20); - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), true); - testTask.pre_processing(); - testTask.run(); - testTask.post_processing(); - - ASSERT_EQ(out[0], 300); -} - -TEST(lopatin_i_count_words_seq, test_1500_words) { - std::vector input = lopatin_i_count_words_seq::generateLongString(100); - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), true); - testTask.pre_processing(); - testTask.run(); - testTask.post_processing(); - - ASSERT_EQ(out[0], 1500); -} - -TEST(lopatin_i_count_words_seq, test_6k_words) { - std::vector input = lopatin_i_count_words_seq::generateLongString(400); - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), true); - testTask.pre_processing(); - testTask.run(); - testTask.post_processing(); - - ASSERT_EQ(out[0], 6000); -} \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp b/tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp deleted file mode 100644 index 96510732c77..00000000000 --- a/tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace lopatin_i_count_words_seq { -std::vector generateLongString(int n); - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - int wordCount{}; - int spaceCount{}; -}; - -} // namespace lopatin_i_count_words_seq \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp b/tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp deleted file mode 100644 index 61b2e1fa450..00000000000 --- a/tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp +++ /dev/null @@ -1,66 +0,0 @@ -#include - -#include "core/perf/include/perf.hpp" -#include "seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp" - -std::vector testData = lopatin_i_count_words_seq::generateLongString(1000); - -TEST(word_count_seq, test_pipeline_run) { - std::vector input = testData; - std::vector word_count(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); - taskData->outputs_count.emplace_back(word_count.size()); - - auto testTask = std::make_shared(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 1000; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTask); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - ASSERT_EQ(word_count[0], 15000); -} - -TEST(word_count_seq, test_task_run) { - std::vector input = testData; - std::vector word_count(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); - taskData->outputs_count.emplace_back(word_count.size()); - - auto testTask = std::make_shared(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 1000; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTask); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - ASSERT_EQ(word_count[0], 15000); -} \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp b/tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp deleted file mode 100644 index 823c3cb8c24..00000000000 --- a/tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp +++ /dev/null @@ -1,52 +0,0 @@ -#include "seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp" - -namespace lopatin_i_count_words_seq { - -std::vector generateLongString(int n) { - std::vector testData; - std::string testString = "This is a long sentence for performance testing of the word count algorithm using MPI. "; - for (int i = 0; i < n - 1; i++) { - for (unsigned long int j = 0; j < testString.length(); j++) { - testData.push_back(testString[j]); - } - } - std::string lastSentence = "This is a long sentence for performance testing of the word count algorithm using MPI."; - for (unsigned long int j = 0; j < lastSentence.length(); j++) { - testData.push_back(lastSentence[j]); - } - return testData; -} - -bool lopatin_i_count_words_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - input_ = std::vector(taskData->inputs_count[0]); - auto* tempPtr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tempPtr[i]; - } - return true; -} - -bool lopatin_i_count_words_seq::TestTaskSequential::validation() { - internal_order_test(); - return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; -} - -bool lopatin_i_count_words_seq::TestTaskSequential::run() { - internal_order_test(); - for (char c : input_) { - if (c == ' ') { - spaceCount++; - } - } - wordCount = spaceCount + 1; - return true; -} - -bool lopatin_i_count_words_seq::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = wordCount; - return true; -} - -} // namespace lopatin_i_count_words_seq \ No newline at end of file From 17935f02844e48027e4c3079484a8ad9e86cb026 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Sat, 2 Nov 2024 10:22:29 +0800 Subject: [PATCH 050/155] =?UTF-8?q?Revert=20"=D0=93=D1=80=D1=83=D0=B4?= =?UTF-8?q?=D0=B7=D0=B8=D0=BD=20=D0=9A=D0=BE=D0=BD=D1=81=D1=82=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=D0=B8=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.?= =?UTF-8?q?=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=207.=20=D0=9D?= =?UTF-8?q?=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BD?= =?UTF-8?q?=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5=20=D0=B1=D0=BB=D0=B8?= =?UTF-8?q?=D0=B7=D0=BA=D0=B8=D1=85=20=D0=BF=D0=BE=20=D0=B7=D0=BD=D0=B0?= =?UTF-8?q?=D1=87=D0=B5=D0=BD=D0=B8=D1=8E=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4?= =?UTF-8?q?=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0."=20(?= =?UTF-8?q?#124)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#54 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11639004346/job/32414587483 image --- .../func_tests/main.cpp | 309 ------------------ .../include/ops_mpi.hpp | 48 --- .../perf_tests/main.cpp | 90 ----- .../src/ops_mpi.cpp | 101 ------ .../func_tests/main.cpp | 133 -------- .../include/ops_seq.hpp | 24 -- .../perf_tests/main.cpp | 85 ----- .../src/ops_seq.cpp | 35 -- 8 files changed, 825 deletions(-) delete mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp delete mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp delete mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp delete mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp delete mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp delete mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp delete mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp delete mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp deleted file mode 100644 index 880dd20042f..00000000000 --- a/tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include -#include - -#include "mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp" - -namespace grudzin_k_nearest_neighbor_elements_mpi { - -std::vector getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = -100 + gen() % 201; - } - return vec; -} - -} // namespace grudzin_k_nearest_neighbor_elements_mpi - -TEST(grudzin_k_nearest_neighbor_elements_mpi, Wrong_Test) { - boost::mpi::communicator world; - std::vector global_vec(1); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - // Create data - std::vector reference_ans(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); - taskDataSeq->outputs_count.emplace_back(reference_ans.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), false); - } -} - -TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_10k) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(1, INT_MAX); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 10000; - global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_ans(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); - taskDataSeq->outputs_count.emplace_back(reference_ans.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_ans[0], global_ans[0]); - } -} - -TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_1k) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(1, INT_MAX); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 1000; - global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_ans(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); - taskDataSeq->outputs_count.emplace_back(reference_ans.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_ans[0], global_ans[0]); - } -} - -TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_2k) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(1, INT_MAX); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 2000; - global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_ans(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); - taskDataSeq->outputs_count.emplace_back(reference_ans.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_ans[0], global_ans[0]); - } -} - -TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_4k) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(1, INT_MAX); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 4000; - global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_ans(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); - taskDataSeq->outputs_count.emplace_back(reference_ans.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_ans[0], global_ans[0]); - } -} - -TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_3k) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(1, INT_MAX); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 3000; - global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_ans(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); - taskDataSeq->outputs_count.emplace_back(reference_ans.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_ans[0], global_ans[0]); - } -} - -TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_3) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(1, INT_MAX); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 3; - global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_ans(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); - taskDataSeq->outputs_count.emplace_back(reference_ans.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_ans[0], global_ans[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp deleted file mode 100644 index 7591263e048..00000000000 --- a/tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace grudzin_k_nearest_neighbor_elements_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - std::pair res{}; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - std::pair res; - size_t size; - size_t start; - boost::mpi::communicator world; -}; - -} // namespace grudzin_k_nearest_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp deleted file mode 100644 index 342f90c6b7c..00000000000 --- a/tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp" - -TEST(grudzin_k_nearest_neighbor_elements_mpi, test_pipeline_run) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_vector; - if (world.rank() == 0) { - count_size_vector = 5000000; - global_vec = std::vector(count_size_vector, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(0, global_ans[0]); - } -} - -TEST(grudzin_k_nearest_neighbor_elements_mpi, test_task_run) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_vector; - if (world.rank() == 0) { - count_size_vector = 50000000; - global_vec = std::vector(count_size_vector, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(0, global_sum[0]); - } -} diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp deleted file mode 100644 index 496b3ccf9db..00000000000 --- a/tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp" - -#include -#include -#include - -bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); - // Init value for output - res = {INT_MAX, -1}; - return true; -} - -bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - // Check count elements of output - return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; -} - -bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::run() { - internal_order_test(); - for (size_t i = 0; i < input_.size() - 1; ++i) { - std::pair tmp = {abs(input_[i] - input_[i + 1]), i}; - res = std::min(res, tmp); - } - return true; -} - -bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res.second; - return true; -} - -bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - // Init value for output - res = {INT_MAX, -1}; - return true; -} - -bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - // Check count elements of output - return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; - } - return true; -} - -bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::run() { - internal_order_test(); - unsigned int delta = 0; - if (world.rank() == 0) { - delta = (taskData->inputs_count[0]) / world.size(); - size = taskData->inputs_count[0]; - if (taskData->inputs_count[0] % world.size() > 0u) delta++; - } - broadcast(world, delta, 0); - broadcast(world, size, 0); - - if (world.rank() == 0) { - // Init vectors - input_ = std::vector(world.size() * delta + 2, 0); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); - for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, input_.data() + proc * delta, delta + 1); - } - } - - local_input_ = std::vector(delta + 1); - start = world.rank() * delta; - if (world.rank() == 0) { - local_input_ = std::vector(input_.begin(), input_.begin() + delta + 1); - } else { - world.recv(0, 0, local_input_.data(), delta + 1); - } - - std::pair local_ans_ = {INT_MAX, -1}; - for (size_t i = 0; i < local_input_.size() - 1 && (i + start) < size - 1; ++i) { - std::pair tmp = {abs(local_input_[i] - local_input_[i + 1]), i + start}; - local_ans_ = std::min(local_ans_, tmp); - } - reduce(world, local_ans_, res, boost::mpi::minimum>(), 0); - return true; -} - -bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = res.second; - } - return true; -} diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp deleted file mode 100644 index dfd38c04fa4..00000000000 --- a/tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp" - -TEST(grudzin_k_nearest_neighbor_elements_seq, Wrong_Test) { - std::vector in = {2}; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_Lazy) { - // Create data - std::vector in = {2, 3}; - std::vector out(1, 0); - int ans = 0; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_24) { - // Create data - std::vector in = {2, 3, 4, 1, 7, 3, 2, 9, -15, 3}; - std::vector out(1, 0); - int ans = 0; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_40) { - // Create data - std::vector in = {2, 3, 4, 1, 7, 3, 2, 9, -15, 3, -1, 5, 8, 5, 12, 9, 24, 12, - 2, 3, 4, 1, 7, 3, 2, 9, -15, 3, -1, 5, 8, 5, 12, 9, 24, 12}; - std::vector out(1, 0); - int ans = 0; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_60) { - // Create data - std::vector in(100, 0); - std::vector out(1, 0); - int ans = 0; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_Negative) { - // Create data - std::vector in = {-1, -3, -5, -4, -2}; - std::vector out(1, 0); - int ans = 2; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp deleted file mode 100644 index 9f91d411f6d..00000000000 --- a/tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace grudzin_k_nearest_neighbor_elements_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_{}; - std::pair res{}; -}; - -} // namespace grudzin_k_nearest_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp deleted file mode 100644 index 785b6a6321c..00000000000 --- a/tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp" - -TEST(grudzin_k_nearest_neighbor_elements_seq, test_pipeline_run) { - int size = 10000000; - // Create data - std::vector in(size); - std::vector out(1, 0); - int ans = 3; - for (int i = 0; i < size; ++i) { - in[i] = 3 * i; - } - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ans, out[0]); -} - -TEST(grudzin_k_nearest_neighbor_elements_seq, test_task_run) { - int size = 10000000; - // Create data - std::vector in(size); - std::vector out(1, 0); - int ans = 2; - for (int i = 0; i < size; ++i) { - in[i] = 2 * i; - } - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ans, out[0]); -} diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp deleted file mode 100644 index c661925eb7c..00000000000 --- a/tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2024 Nesterov Alexander -#include "seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp" - -#include - -bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - // Init value for input and output - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); - // Init value for output - res = {INT_MAX, -1}; - return true; -} - -bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::validation() { - internal_order_test(); - // Check count elements of output - return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; -} - -bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::run() { - internal_order_test(); - for (size_t i = 0; i < input_.size() - 1; i++) { - res = std::min(res, {abs(input_[i] - input_[i + 1]), i}); - } - return true; -} - -bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res.second; - return true; -} From f6d84b18f6442250b3a1bf5254fcf11bc55e5fdc Mon Sep 17 00:00:00 2001 From: Kharin Matvey <133578066+mateusxap@users.noreply.github.com> Date: Sat, 2 Nov 2024 15:20:30 +0300 Subject: [PATCH 051/155] =?UTF-8?q?=D0=A5=D0=B0=D1=80=D0=B8=D0=BD=20=D0=9C?= =?UTF-8?q?=D0=B0=D1=82=D0=B2=D0=B5=D0=B9.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2025.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D0=B5=D1=82=20=D1=87?= =?UTF-8?q?=D0=B8=D1=81=D0=BB=D0=B0=20=D0=BF=D1=80=D0=B5=D0=B4=D0=BB=D0=BE?= =?UTF-8?q?=D0=B6=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=B2=20=D1=81=D1=82=D1=80?= =?UTF-8?q?=D0=BE=D0=BA=D0=B5.=20(#68)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание последовательной задачи** Задача подсчета предложений в тексте выполняется последовательно, символ за символом. Программа анализирует каждый символ текста и увеличивает счетчик предложений, если встречает знак окончания предложения ('.', '?', '!'). **Описание MPI задачи** Задача подсчета предложений в тексте решается параллельно, разбивая текст на фрагменты и распределяя их между несколькими процессами с помощью MPI. Каждый процесс независимо подсчитывает количество предложений в своем фрагменте, а затем результаты суммируются для получения общего количества предложений во всем тексте. --- .../func_tests/main.cpp | 228 ++++++++++++++++++ .../include/ops_mpi.hpp | 41 ++++ .../perf_tests/main.cpp | 95 ++++++++ .../src/ops_mpi.cpp | 98 ++++++++ .../func_tests/main.cpp | 81 +++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 82 +++++++ .../src/ops_seq.cpp | 36 +++ 8 files changed, 685 insertions(+) create mode 100644 tasks/mpi/kharin_m_number_of_sentences_mpi/func_tests/main.cpp create mode 100644 tasks/mpi/kharin_m_number_of_sentences_mpi/include/ops_mpi.hpp create mode 100644 tasks/mpi/kharin_m_number_of_sentences_mpi/perf_tests/main.cpp create mode 100644 tasks/mpi/kharin_m_number_of_sentences_mpi/src/ops_mpi.cpp create mode 100644 tasks/seq/kharin_m_number_of_sentences_seq/func_tests/main.cpp create mode 100644 tasks/seq/kharin_m_number_of_sentences_seq/include/ops_seq.hpp create mode 100644 tasks/seq/kharin_m_number_of_sentences_seq/perf_tests/main.cpp create mode 100644 tasks/seq/kharin_m_number_of_sentences_seq/src/ops_seq.cpp diff --git a/tasks/mpi/kharin_m_number_of_sentences_mpi/func_tests/main.cpp b/tasks/mpi/kharin_m_number_of_sentences_mpi/func_tests/main.cpp new file mode 100644 index 00000000000..6919caba82a --- /dev/null +++ b/tasks/mpi/kharin_m_number_of_sentences_mpi/func_tests/main.cpp @@ -0,0 +1,228 @@ +#include + +#include +#include +#include + +#include "mpi/kharin_m_number_of_sentences_mpi/include/ops_mpi.hpp" + +TEST(Parallel_Sentences_Count_MPI, Test_Simple_Sentences) { + boost::mpi::communicator world; + std::string input_text; + std::vector sentence_count(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + input_text = "This is sentence one. This is sentence two! Is this sentence three? This is sentence four."; + + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataPar->inputs_count.emplace_back(input_text.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataPar->outputs_count.emplace_back(sentence_count.size()); + + kharin_m_number_of_sentences_mpi::CountSentencesParallel countSentencesParallel(taskDataPar); + ASSERT_EQ(countSentencesParallel.validation(), true); + countSentencesParallel.pre_processing(); + countSentencesParallel.run(); + countSentencesParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + // Run sequential version + kharin_m_number_of_sentences_mpi::CountSentencesSequential countSentencesSequential(taskDataSeq); + ASSERT_EQ(countSentencesSequential.validation(), true); + countSentencesSequential.pre_processing(); + countSentencesSequential.run(); + countSentencesSequential.post_processing(); + + // Compare results + ASSERT_EQ(reference_count[0], 4); + ASSERT_EQ(reference_count[0], sentence_count[0]); + } +} + +TEST(Parallel_Sentences_Count_MPI, Test_Empty_Text) { + boost::mpi::communicator world; + std::string input_text; + std::vector sentence_count(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataPar->inputs_count.emplace_back(input_text.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataPar->outputs_count.emplace_back(sentence_count.size()); + + kharin_m_number_of_sentences_mpi::CountSentencesParallel countSentencesParallel(taskDataPar); + ASSERT_EQ(countSentencesParallel.validation(), true); + countSentencesParallel.pre_processing(); + countSentencesParallel.run(); + countSentencesParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + // Run sequential version + kharin_m_number_of_sentences_mpi::CountSentencesSequential countSentencesSequential(taskDataSeq); + ASSERT_EQ(countSentencesSequential.validation(), true); + countSentencesSequential.pre_processing(); + countSentencesSequential.run(); + countSentencesSequential.post_processing(); + + // Compare results + ASSERT_EQ(reference_count[0], 0); + ASSERT_EQ(reference_count[0], sentence_count[0]); + } +} + +TEST(Parallel_Sentences_Count_MPI, Test_Long_Text) { + boost::mpi::communicator world; + std::string input_text; + std::vector sentence_count(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + for (int i = 0; i < 100; i++) { + input_text += "This is sentence number " + std::to_string(i + 1) + ". "; + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataPar->inputs_count.emplace_back(input_text.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataPar->outputs_count.emplace_back(sentence_count.size()); + + kharin_m_number_of_sentences_mpi::CountSentencesParallel countSentencesParallel(taskDataPar); + ASSERT_EQ(countSentencesParallel.validation(), true); + countSentencesParallel.pre_processing(); + countSentencesParallel.run(); + countSentencesParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + kharin_m_number_of_sentences_mpi::CountSentencesSequential countSentencesSequential(taskDataSeq); + ASSERT_EQ(countSentencesSequential.validation(), true); + countSentencesSequential.pre_processing(); + countSentencesSequential.run(); + countSentencesSequential.post_processing(); + + ASSERT_EQ(reference_count[0], 100); + ASSERT_EQ(reference_count[0], sentence_count[0]); + } +} + +TEST(Parallel_Sentences_Count_MPI, Test_Sentences_with_other_symbols) { + boost::mpi::communicator world; + std::string input_text; + std::vector sentence_count(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + input_text = "Hi! What's you're name? My name is Matthew. How are you? I'm fine, thank you. And you? I'm also fine."; + + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataPar->inputs_count.emplace_back(input_text.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataPar->outputs_count.emplace_back(sentence_count.size()); + + kharin_m_number_of_sentences_mpi::CountSentencesParallel countSentencesParallel(taskDataPar); + ASSERT_EQ(countSentencesParallel.validation(), true); + countSentencesParallel.pre_processing(); + countSentencesParallel.run(); + countSentencesParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + // Run sequential version + kharin_m_number_of_sentences_mpi::CountSentencesSequential countSentencesSequential(taskDataSeq); + ASSERT_EQ(countSentencesSequential.validation(), true); + countSentencesSequential.pre_processing(); + countSentencesSequential.run(); + countSentencesSequential.post_processing(); + + // Compare results + ASSERT_EQ(reference_count[0], 7); + ASSERT_EQ(reference_count[0], sentence_count[0]); + } +} + +TEST(Parallel_Sentences_Count_MPI, Test_Random_Text) { + boost::mpi::communicator world; + std::vector sentence_count(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + // Генерация случайной строки + char* c_input_text = nullptr; + int text_length = 0; + if (world.rank() == 0) { + std::default_random_engine generator; + std::uniform_int_distribution length_distribution(10, 100); + std::string allowed_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .?!"; + std::uniform_int_distribution char_distribution(0, allowed_chars.size() - 1); + + // Длина строки + text_length = length_distribution(generator); + c_input_text = new char[text_length + 1]; + + // Заполнение случайными символами + for (int i = 0; i < text_length; i++) { + c_input_text[i] = allowed_chars[char_distribution(generator)]; + } + c_input_text[text_length] = '\0'; + } + + boost::mpi::broadcast(world, text_length, 0); + if (world.rank() != 0) { + c_input_text = new char[text_length + 1]; // Выделение памяти для других процессов + } + boost::mpi::broadcast(world, c_input_text, text_length + 1, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(c_input_text)); + taskDataPar->inputs_count.emplace_back(text_length); + taskDataPar->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataPar->outputs_count.emplace_back(sentence_count.size()); + + // Запуск параллельного подсчета + kharin_m_number_of_sentences_mpi::CountSentencesParallel countSentencesParallel(taskDataPar); + ASSERT_EQ(countSentencesParallel.validation(), true); + countSentencesParallel.pre_processing(); + countSentencesParallel.run(); + countSentencesParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(c_input_text)); + taskDataSeq->inputs_count.emplace_back(text_length); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + // Запуск последовательного подсчета для проверки + kharin_m_number_of_sentences_mpi::CountSentencesSequential countSentencesSequential(taskDataSeq); + ASSERT_EQ(countSentencesSequential.validation(), true); + countSentencesSequential.pre_processing(); + countSentencesSequential.run(); + countSentencesSequential.post_processing(); + + ASSERT_EQ(reference_count[0], sentence_count[0]); + } + delete[] c_input_text; // Очистка динамической памяти +} \ No newline at end of file diff --git a/tasks/mpi/kharin_m_number_of_sentences_mpi/include/ops_mpi.hpp b/tasks/mpi/kharin_m_number_of_sentences_mpi/include/ops_mpi.hpp new file mode 100644 index 00000000000..28191b6b852 --- /dev/null +++ b/tasks/mpi/kharin_m_number_of_sentences_mpi/include/ops_mpi.hpp @@ -0,0 +1,41 @@ +#pragma once + +#include + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kharin_m_number_of_sentences_mpi { + +class CountSentencesParallel : public ppc::core::Task { + public: + explicit CountSentencesParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string text, local_text; + int sentence_count{}; + boost::mpi::communicator world; +}; + +class CountSentencesSequential : public ppc::core::Task { + public: + explicit CountSentencesSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string text; + int sentence_count{}; +}; + +} // namespace kharin_m_number_of_sentences_mpi diff --git a/tasks/mpi/kharin_m_number_of_sentences_mpi/perf_tests/main.cpp b/tasks/mpi/kharin_m_number_of_sentences_mpi/perf_tests/main.cpp new file mode 100644 index 00000000000..79849cb6d28 --- /dev/null +++ b/tasks/mpi/kharin_m_number_of_sentences_mpi/perf_tests/main.cpp @@ -0,0 +1,95 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kharin_m_number_of_sentences_mpi/include/ops_mpi.hpp" + +TEST(mpi_kharin_m_sentence_count_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + std::string input_text; + std::vector sentence_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + input_text = "This is a long text with many sentences. "; + for (int i = 0; i < 10000000; i++) { + input_text += "Sentence " + std::to_string(i + 1) + ". "; + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataPar->inputs_count.emplace_back(input_text.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataPar->outputs_count.emplace_back(sentence_count.size()); + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + // Проверяем результат (должно быть 10000001 предложение) + ASSERT_EQ(10000001, sentence_count[0]); + } +} + +TEST(mpi_kharin_m_sentence_count_perf_test, test_task_run) { + boost::mpi::communicator world; + std::string input_text; + std::vector sentence_count(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + input_text = "This is a long text with many sentences. "; + for (int i = 0; i < 10000000; i++) { + input_text += "Sentence " + std::to_string(i + 1) + ". "; + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataPar->inputs_count.emplace_back(input_text.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataPar->outputs_count.emplace_back(sentence_count.size()); + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + // Проверяем результат (должно быть 10000001 предложение) + ASSERT_EQ(10000001, sentence_count[0]); + } +} diff --git a/tasks/mpi/kharin_m_number_of_sentences_mpi/src/ops_mpi.cpp b/tasks/mpi/kharin_m_number_of_sentences_mpi/src/ops_mpi.cpp new file mode 100644 index 00000000000..b8b624ff0d2 --- /dev/null +++ b/tasks/mpi/kharin_m_number_of_sentences_mpi/src/ops_mpi.cpp @@ -0,0 +1,98 @@ +#include "mpi/kharin_m_number_of_sentences_mpi/include/ops_mpi.hpp" + +#include +#include +#include + +namespace kharin_m_number_of_sentences_mpi { + +bool CountSentencesSequential::pre_processing() { + internal_order_test(); + text = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + sentence_count = 0; + return true; +} + +bool CountSentencesSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool CountSentencesSequential::run() { + internal_order_test(); + for (size_t i = 0; i < text.size(); i++) { + char c = text[i]; + if (c == '.' || c == '?' || c == '!') { + sentence_count++; + } + } + return true; +} + +bool CountSentencesSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = sentence_count; + return true; +} + +bool CountSentencesParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + text = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + } + sentence_count = 0; + return true; +} + +bool CountSentencesParallel::validation() { + internal_order_test(); + return world.rank() == 0 ? taskData->outputs_count[0] == 1 : true; +} + +bool CountSentencesParallel::run() { + internal_order_test(); + int base_part_size = 0; + int remainder = 0; + int text_length = 0; + if (world.rank() == 0) { + text_length = text.size(); + base_part_size = text_length / world.size(); + remainder = text_length % world.size(); + } + + boost::mpi::broadcast(world, base_part_size, 0); + boost::mpi::broadcast(world, remainder, 0); + + // Вычисляем начальную и конечную позиции для каждого процесса + int start = world.rank() * base_part_size + std::min(world.rank(), remainder); + int end = start + base_part_size + (world.rank() < remainder ? 1 : 0); + + // Каждый процесс создает свою local_text + int delta = end - start; + local_text = std::string(delta, ' '); + copy(reinterpret_cast(taskData->inputs[0]) + start, + reinterpret_cast(taskData->inputs[0]) + end, local_text.begin()); + + // Подсчет предложений в локальной части + int local_count = 0; + for (size_t i = 0; i < local_text.size(); i++) { + char c = local_text[i]; + if (c == '.' || c == '?' || c == '!') { + local_count++; + } + } + + // Суммирование результатов + boost::mpi::reduce(world, local_count, sentence_count, std::plus<>(), 0); + return true; +} + +bool CountSentencesParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = sentence_count; + } + return true; +} + +} // namespace kharin_m_number_of_sentences_mpi \ No newline at end of file diff --git a/tasks/seq/kharin_m_number_of_sentences_seq/func_tests/main.cpp b/tasks/seq/kharin_m_number_of_sentences_seq/func_tests/main.cpp new file mode 100644 index 00000000000..7d5325e2632 --- /dev/null +++ b/tasks/seq/kharin_m_number_of_sentences_seq/func_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include "seq/kharin_m_number_of_sentences_seq/include/ops_seq.hpp" + +TEST(Sequential_Sentences_Count, Test_Simple_Sentences) { + std::string input_text = "This is sentence one. This is sentence two! Is this sentence three? This is sentence four."; + std::vector sentence_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataSeq->outputs_count.emplace_back(sentence_count.size()); + // Run sequential version + kharin_m_number_of_sentences_seq::CountSentencesSequential countSentencesSequential(taskDataSeq); + ASSERT_EQ(countSentencesSequential.validation(), true); + countSentencesSequential.pre_processing(); + countSentencesSequential.run(); + countSentencesSequential.post_processing(); + // Compare results + ASSERT_EQ(sentence_count[0], 4); +} + +TEST(Sequential_Sentences_Count, Test_Empty_Text) { + std::string input_text; + std::vector sentence_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataSeq->outputs_count.emplace_back(sentence_count.size()); + // Run sequential version + kharin_m_number_of_sentences_seq::CountSentencesSequential countSentencesSequential(taskDataSeq); + ASSERT_EQ(countSentencesSequential.validation(), true); + countSentencesSequential.pre_processing(); + countSentencesSequential.run(); + countSentencesSequential.post_processing(); + // Compare results + ASSERT_EQ(sentence_count[0], 0); +} + +TEST(Sequential_Sentences_Count, Test_Long_Text) { + std::string input_text; + std::vector sentence_count(1, 0); + + for (int i = 0; i < 100; i++) { + input_text += "This is sentence number " + std::to_string(i + 1) + ". "; + } + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataSeq->outputs_count.emplace_back(sentence_count.size()); + // Run sequential version + kharin_m_number_of_sentences_seq::CountSentencesSequential countSentencesSequential(taskDataSeq); + ASSERT_EQ(countSentencesSequential.validation(), true); + countSentencesSequential.pre_processing(); + countSentencesSequential.run(); + countSentencesSequential.post_processing(); + // Compare results + ASSERT_EQ(sentence_count[0], 100); +} + +TEST(Sequential_Sentences_Count, Test_Sentences_with_other_symbols) { + std::string input_text = + "Hi! What's you're name? My name is Matthew. How are you? I'm fine, thank you. And you? I'm also fine."; + std::vector sentence_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataSeq->outputs_count.emplace_back(sentence_count.size()); + // Run sequential version + kharin_m_number_of_sentences_seq::CountSentencesSequential countSentencesSequential(taskDataSeq); + ASSERT_EQ(countSentencesSequential.validation(), true); + countSentencesSequential.pre_processing(); + countSentencesSequential.run(); + countSentencesSequential.post_processing(); + // Compare results + ASSERT_EQ(sentence_count[0], 7); +} \ No newline at end of file diff --git a/tasks/seq/kharin_m_number_of_sentences_seq/include/ops_seq.hpp b/tasks/seq/kharin_m_number_of_sentences_seq/include/ops_seq.hpp new file mode 100644 index 00000000000..a1aa37a9d11 --- /dev/null +++ b/tasks/seq/kharin_m_number_of_sentences_seq/include/ops_seq.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kharin_m_number_of_sentences_seq { + +class CountSentencesSequential : public ppc::core::Task { + public: + explicit CountSentencesSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string text; + int sentence_count{}; +}; +} // namespace kharin_m_number_of_sentences_seq diff --git a/tasks/seq/kharin_m_number_of_sentences_seq/perf_tests/main.cpp b/tasks/seq/kharin_m_number_of_sentences_seq/perf_tests/main.cpp new file mode 100644 index 00000000000..97fdbdf41a3 --- /dev/null +++ b/tasks/seq/kharin_m_number_of_sentences_seq/perf_tests/main.cpp @@ -0,0 +1,82 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kharin_m_number_of_sentences_seq/include/ops_seq.hpp" + +TEST(seq_kharin_m_sentence_count_perf_test, test_pipeline_run) { + std::string input_text; + std::vector sentence_count(1, 0); + input_text = "This is a long text with many sentences. "; + for (int i = 0; i < 10000000; i++) { + input_text += "Sentence " + std::to_string(i + 1) + ". "; + } + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataSeq->outputs_count.emplace_back(sentence_count.size()); + + auto testSeqTask = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; // Конвертируем в секунды + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testSeqTask); + + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(10000001, sentence_count[0]); +} + +TEST(seq_kharin_m_sentence_count_perf_test, test_task_run) { + std::string input_text; + std::vector sentence_count(1, 0); + + input_text = "This is a long text with many sentences. "; + for (int i = 0; i < 10000000; i++) { + input_text += "Sentence " + std::to_string(i + 1) + ". "; + } + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(input_text.c_str()))); + taskDataSeq->inputs_count.emplace_back(input_text.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(sentence_count.data())); + taskDataSeq->outputs_count.emplace_back(sentence_count.size()); + + auto testSeqTask = std::make_shared(taskDataSeq); + + // Create Perf attributesы + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; // Конвертируем в секунды + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testSeqTask); + + perfAnalyzer->task_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(10000001, sentence_count[0]); +} diff --git a/tasks/seq/kharin_m_number_of_sentences_seq/src/ops_seq.cpp b/tasks/seq/kharin_m_number_of_sentences_seq/src/ops_seq.cpp new file mode 100644 index 00000000000..81d830e140a --- /dev/null +++ b/tasks/seq/kharin_m_number_of_sentences_seq/src/ops_seq.cpp @@ -0,0 +1,36 @@ +#include "seq/kharin_m_number_of_sentences_seq/include/ops_seq.hpp" + +#include + +namespace kharin_m_number_of_sentences_seq { + +bool CountSentencesSequential::pre_processing() { + internal_order_test(); + text = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + sentence_count = 0; + return true; +} + +bool CountSentencesSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool CountSentencesSequential::run() { + internal_order_test(); + for (size_t i = 0; i < text.size(); i++) { + char c = text[i]; + if (c == '.' || c == '?' || c == '!') { + sentence_count++; + } + } + return true; +} + +bool CountSentencesSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = sentence_count; + return true; +} + +} // namespace kharin_m_number_of_sentences_seq From b8939606d5aef14134a96ad350b6ea02fc27630c Mon Sep 17 00:00:00 2001 From: VlJulia <129722951+VlJulia@users.noreply.github.com> Date: Sat, 2 Nov 2024 15:20:41 +0300 Subject: [PATCH 052/155] =?UTF-8?q?=D0=92=D0=BB=D0=B0=D0=B4=D0=B8=D0=BC?= =?UTF-8?q?=D0=B8=D1=80=D0=BE=D0=B2=D0=B0=20=D0=AE=D0=BB=D0=B8=D1=8F.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2013.=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8?= =?UTF-8?q?=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0?= =?UTF-8?q?=D1=87=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5?= =?UTF-8?q?=D0=BD=D1=82=D0=BE=D0=B2=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86?= =?UTF-8?q?=D1=8B.=20(#70)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Задача заключается в нахождении максимального значения элементов всей матрицы _Алгоритм:_ Работаем с развернутой матрицей (вектор). Находим максимальный элемент путем прохождения по вектору и сравнения текущего максимального значения с элементом вектора, если значение элемента больше, то меняем текущее максимальное значение на этот элемент и продолжаем. **Описание последовательной задачи** Находим максимальное значение применив алгоритм ко всей матрице сразу. **Описание последовательной задачи** Делим развернутую матрицу поровну между процессами, если поровну разделить не удалось, то процессы с 1 начинают забирать себе по единице остатка, таким образом максимальная разница в кол-ве работы между процессами равна 1 . Все процессы применяют алгоритм к своей части, из полученных значений оставляем максимальное - это и есть решение. --- .../func_tests/main.cpp | 590 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 ++ .../perf_tests/main.cpp | 136 ++++ .../src/ops_mpi.cpp | 139 +++++ .../func_tests/main.cpp | 400 ++++++++++++ .../include/ops_seq.hpp | 25 + .../perf_tests/main.cpp | 119 ++++ .../src/ops_seq.cpp | 51 ++ 8 files changed, 1508 insertions(+) create mode 100644 tasks/mpi/vladimirova_j_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/vladimirova_j_max_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/vladimirova_j_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/vladimirova_j_max_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/vladimirova_j_max_of_vector_elements/func_tests/main.cpp b/tasks/mpi/vladimirova_j_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..a9fb6783b4f --- /dev/null +++ b/tasks/mpi/vladimirova_j_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,590 @@ +#include + +#include +#include +#include +#include + +#include "mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp" + +std::vector CreateVector(size_t size, size_t spread_of_val) { + // Init value for input and output + std::random_device dev; + std::mt19937 random(dev()); + std::vector v(size); + for (size_t i = 0; i < size; i++) { + v[i] = (random() % (2 * spread_of_val + 1)) - spread_of_val; + } + return v; +} + +std::vector> CreateInputMatrix(size_t row_c, size_t col_c, size_t spread_of_val) { + std::vector> m(row_c); + for (size_t i = 0; i < row_c; i++) { + m[i] = CreateVector(col_c, spread_of_val); + } + return m; +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_ValMatrix_0) { + const size_t size = 0; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(size, size, spread); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr.data())); + taskDataPar->inputs_count.emplace_back(size); + taskDataPar->inputs_count.emplace_back(size); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_CanCreate_10) { EXPECT_NO_THROW(CreateInputMatrix(10, 10, 10)); } + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_1) { + const size_t size = 1; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(size, size, spread); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(size); + taskDataPar->inputs_count.emplace_back(size); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_1_2) { + const size_t row = 1; + const size_t col = 2; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(row, col, spread); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(row); + taskDataPar->inputs_count.emplace_back(col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(row); + taskDataSeq->inputs_count.emplace_back(col); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_3_1) { + const size_t row = 1; + const size_t col = 2; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(row, col, spread); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(row); + taskDataPar->inputs_count.emplace_back(col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(row); + taskDataSeq->inputs_count.emplace_back(col); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_10) { + const size_t size = 10; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(size, size, spread); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(size); + taskDataPar->inputs_count.emplace_back(size); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_50) { + const size_t size = 50; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(size, size, spread); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(size); + taskDataPar->inputs_count.emplace_back(size); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_100) { + const size_t size = 200; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(size, size, spread); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(size); + taskDataPar->inputs_count.emplace_back(size); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_10_50) { + const size_t row = 10; + const size_t col = 50; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(row, col, spread); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(row); + taskDataPar->inputs_count.emplace_back(col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(row); + taskDataSeq->inputs_count.emplace_back(col); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_100_50) { + const size_t row = 100; + const size_t col = 50; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(row, col, spread); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(row); + taskDataPar->inputs_count.emplace_back(col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(row); + taskDataSeq->inputs_count.emplace_back(col); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_50_WithSeveralMax) { + const size_t size = 50; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(size, size, spread); + global_matr[0][0] = spread; + global_matr[5][25] = spread; + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(size); + taskDataPar->inputs_count.emplace_back(size); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_100_50_WithSeveralMax) { + const size_t row = 100; + const size_t col = 50; + const size_t spread = 30; + + boost::mpi::communicator world; + std::vector> global_matr; + std::vector global_max(1, -((int)(spread + 10))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = CreateInputMatrix(row, col, spread); + global_matr[25][10] = spread; + global_matr[5][25] = spread; + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataPar->inputs_count.emplace_back(row); + taskDataPar->inputs_count.emplace_back(col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matr[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + taskDataSeq->inputs_count.emplace_back(row); + taskDataSeq->inputs_count.emplace_back(col); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} diff --git a/tasks/mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..4db9e016444 --- /dev/null +++ b/tasks/mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace vladimirova_j_max_of_vector_elements_mpi { + +int FindMaxElem(std::vector m); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; + std::string ops; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace vladimirova_j_max_of_vector_elements_mpi diff --git a/tasks/mpi/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..c1688d3cca9 --- /dev/null +++ b/tasks/mpi/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,136 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp" + +std::vector CreateVector(size_t size, size_t spread_of_val) { + // Init value for input and output + std::random_device dev; + std::mt19937 random(dev()); + std::vector v(size); + for (size_t i = 0; i < size; i++) { + v[i] = (random() % (2 * spread_of_val + 1)) - spread_of_val; + } + return v; +} + +std::vector> CreateInputMatrix(size_t row_c, size_t col_c, size_t spread_of_val) { + std::vector> m(row_c); + for (size_t i = 0; i < row_c; i++) { + m[i] = CreateVector(col_c, spread_of_val); + } + return m; +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, test_pipeline_run) { + int row = 7000; + int col = 7000; + int spread = 7000; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)spread + 10)); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 random(dev()); + + global_matrix = CreateInputMatrix(row, col, spread); + int some_row = random() % row; + int some_column = random() % col; + global_matrix[some_row][some_column] = spread; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(row); + taskDataPar->inputs_count.emplace_back(col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(spread, global_max[0]); + } +} + +TEST(vladimirova_j_max_of_vector_elements_mpi, test_task_run) { + int row = 7000; + int col = 7000; + int spread = 7000; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)spread + 10)); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 random(dev()); + + global_matrix = CreateInputMatrix(row, col, spread); + int some_row = random() % row; + int some_column = random() % col; + global_matrix[some_row][some_column] = spread; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(row); + taskDataPar->inputs_count.emplace_back(col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(spread, global_max[0]); + } +} diff --git a/tasks/mpi/vladimirova_j_max_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/vladimirova_j_max_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..d4562910623 --- /dev/null +++ b/tasks/mpi/vladimirova_j_max_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,139 @@ +#include "mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +int vladimirova_j_max_of_vector_elements_mpi::FindMaxElem(std::vector m) { + if (m.empty()) return INT_MIN; + int max_elem = m[0]; + for (int &i : m) { + if (i > max_elem) { + max_elem = i; + } + } + return max_elem; +} + +bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + input_ = std::vector(taskData->inputs_count[0] * taskData->inputs_count[1]); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto *input_data = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i * taskData->inputs_count[1] + j] = input_data[j]; + } + } + return true; +} + +bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return (taskData->inputs_count[0] > 0) && (taskData->inputs_count[1] > 0) && (taskData->outputs_count[0] == 1); +} + +bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + res = vladimirova_j_max_of_vector_elements_mpi::FindMaxElem(input_); + return true; +} + +bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + unsigned int rows = taskData->inputs_count[0]; + unsigned int columns = taskData->inputs_count[1]; + + input_ = std::vector(rows * columns); + + for (unsigned int i = 0; i < rows; i++) { + auto *input_data = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < columns; j++) { + input_[i * columns + j] = input_data[j]; + } + } + } + + return true; +} + +bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + return (world.rank() != 0) || + ((taskData->outputs_count[0] == 1) && (taskData->inputs_count[0] > 0) && (!taskData->inputs.empty())); +} + +bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + unsigned int delta = 0; + + if (world.rank() == 0) { + // Init vectors + + unsigned int rows = taskData->inputs_count[0]; + unsigned int columns = taskData->inputs_count[1]; + + delta = columns * rows / world.size(); + int div_r = columns * rows % world.size() + 1; + + if (delta == 0) { + for (int i = 1; i < world.size(); i++) { + world.send(i, 0, 0); + } + local_input_ = std::vector(input_.begin(), input_.begin() + div_r - 1); + res = vladimirova_j_max_of_vector_elements_mpi::FindMaxElem(local_input_); + return true; + } + for (int i = 1; i < world.size(); i++) { + world.send(i, 0, delta + (int)(i < div_r)); + } + + for (int i = 1; i < div_r; i++) { + world.send(i, 0, input_.data() + delta * i + i - 1, delta + 1); + } + for (int i = div_r; i < world.size(); i++) { + world.send(i, 0, input_.data() + delta * i + div_r - 1, delta); + } + + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } + + if (world.rank() != 0) { + world.recv(0, 0, delta); + if (delta == 0) return true; + local_input_ = std::vector(delta); + world.recv(0, 0, local_input_.data(), delta); + } + + int local_res = vladimirova_j_max_of_vector_elements_mpi::FindMaxElem(local_input_); + reduce(world, local_res, res, boost::mpi::maximum(), 0); + + return true; +} + +bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/vladimirova_j_max_of_vector_elements/func_tests/main.cpp b/tasks/seq/vladimirova_j_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..4b9615e38ff --- /dev/null +++ b/tasks/seq/vladimirova_j_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,400 @@ +#include + +#include +#include + +#include "seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp" + +std::vector CreateVector(size_t size, size_t spread_of_val) { + std::random_device dev; + std::mt19937 random(dev()); + std::vector v(size); + for (size_t i = 0; i < size; i++) { + v[i] = (random() % (2 * spread_of_val + 1)) - spread_of_val; + } + return v; +} + +std::vector> CreateInputMatrix(size_t row_c, size_t column_c, size_t spread_of_val) { + // Init value for input and output + std::vector> m(row_c); + for (size_t i = 0; i < row_c; i++) { + m[i] = CreateVector(column_c, spread_of_val); + } + return m; +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_ValMatrix_0) { + const size_t size = 0; + const int spread = 10; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(size, size, spread); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), false); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_CanCreate_10) { + const size_t col = 10; + const size_t row = 10; + const int spread = 10; + EXPECT_NO_THROW(CreateInputMatrix(row, col, spread)); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_1) { + const size_t size = 1; + const int spread = 10; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(size, size, spread); + + in[0][0] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_NotSquareMatrix_1_2) { + const size_t col = 1; + const size_t row = 2; + const int spread = 100; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(row, col, spread); + + std::random_device dev; + std::mt19937 random(dev()); + int some_row = random() % row; + in[some_row][0] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(row); + taskDataSeq->inputs_count.emplace_back(col); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_NotSquareMatrix_3_1) { + const size_t col = 3; + const size_t row = 1; + const int spread = 100; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(row, col, spread); + + std::random_device dev; + std::mt19937 random(dev()); + int some_col = random() % col; + in[0][some_col] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(row); + taskDataSeq->inputs_count.emplace_back(col); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_10) { + const size_t size = 10; + const int spread = 10; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(size, size, spread); + + std::random_device dev; + std::mt19937 random(dev()); + int some_row = random() % size; + int some_col = random() % size; + in[some_row][some_col] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_20) { + const size_t size = 20; + const int spread = 50; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(size, size, spread); + + std::random_device dev; + std::mt19937 random(dev()); + int some_row = random() % size; + int some_col = random() % size; + in[some_row][some_col] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_50) { + const size_t size = 50; + const int spread = 50; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(size, size, spread); + + std::random_device dev; + std::mt19937 random(dev()); + int some_row = random() % size; + int some_col = random() % size; + in[some_row][some_col] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_100) { + const size_t size = 100; + const int spread = 100; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(size, size, spread); + + std::random_device dev; + std::mt19937 random(dev()); + int some_row = random() % size; + int some_col = random() % size; + in[some_row][some_col] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_100_WithSeveralMax) { + const size_t size = 100; + const int spread = 100; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(size, size, spread); + + std::random_device dev; + std::mt19937 random(dev()); + int some_row = random() % size; + int some_col = random() % size; + in[some_row][some_col] = spread; + some_row = random() % size; + some_col = random() % size; + in[some_row][some_col] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_NotSquareMatrix_100_50_WithSeveralMax) { + const size_t col = 100; + const size_t row = 50; + const int spread = 100; // spread is excepted answer + + // Create data + std::vector out(1, -110); + std::vector> in = CreateInputMatrix(row, col, spread); + + std::random_device dev; + std::mt19937 random(dev()); + int some_row = random() % row; + int some_col = random() % col; + in[some_row][some_col] = spread; + some_row = random() % row; + some_col = random() % col; + in[some_row][some_col] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(row); + taskDataSeq->inputs_count.emplace_back(col); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} + +TEST(vladimirova_j_max_of_vector_elements_seq, Test_NotSquareMatrix_100_50) { + const size_t col = 100; + const size_t row = 50; + const int spread = 100; // spread is excepted answer + + // Create data + std::vector out(1, -((int)spread + 10)); + std::vector> in = CreateInputMatrix(row, col, spread); + + std::random_device dev; + std::mt19937 random(dev()); + int some_row = random() % row; + int some_col = random() % col; + in[some_row][some_col] = spread; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(row); + taskDataSeq->inputs_count.emplace_back(col); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(spread, out[0]); +} diff --git a/tasks/seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp b/tasks/seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..35f46742500 --- /dev/null +++ b/tasks/seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace vladimirova_j_max_of_vector_elements_seq { + +int FindMaxElem(std::vector m); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int res{}; + std::vector input_; +}; + +} // namespace vladimirova_j_max_of_vector_elements_seq diff --git a/tasks/seq/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp b/tasks/seq/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..b4dce9a9f6e --- /dev/null +++ b/tasks/seq/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,119 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp" + +std::vector CreateVector(size_t size, size_t spread_of_val) { + std::random_device dev; + std::mt19937 random(dev()); + std::vector v(size); + for (size_t i = 0; i < size; i++) { + v[i] = (random() % (2 * spread_of_val + 1)) - spread_of_val; + } + return v; +} + +std::vector> CreateInputMatrix(size_t row_c, size_t column_c, size_t spread_of_val) { + // Init value for input and output + std::vector> m(row_c); + for (size_t i = 0; i < row_c; i++) { + m[i] = CreateVector(column_c, spread_of_val); + } + return m; +} + +TEST(vladimirova_j_max_of_vector_elements_seq, test_pipeline_run) { + std::random_device dev; + std::mt19937 random(dev()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + int size = 7000; + int spread = 7000; + + std::vector> matrix_in; + matrix_in = CreateInputMatrix(size, size, spread); + std::vector out(1, matrix_in[0][0]); + + int some_row = random() % size; + int some_col = random() % size; + matrix_in[some_row][some_col] = spread; + + for (unsigned int i = 0; i < matrix_in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix_in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(spread, out[0]); +} + +TEST(sequential_vladimirova_j_max_of_vector_elements_seq, test_task_run) { + std::random_device dev; + std::mt19937 random(dev()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + int size = 7000; + int spread = 7000; + std::vector> matrix_in; + matrix_in = CreateInputMatrix(size, size, spread); + std::vector out(1, matrix_in[0][0]); + + int some_row = random() % size; + int some_col = random() % size; + matrix_in[some_row][some_col] = spread; + + for (unsigned int i = 0; i < matrix_in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix_in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(spread, out[0]); +} diff --git a/tasks/seq/vladimirova_j_max_of_vector_elements/src/ops_seq.cpp b/tasks/seq/vladimirova_j_max_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..8f46c9a5826 --- /dev/null +++ b/tasks/seq/vladimirova_j_max_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,51 @@ +#include "seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +int vladimirova_j_max_of_vector_elements_seq::FindMaxElem(std::vector m) { + if (m.empty()) return INT_MIN; + int max_elem = m[0]; + for (int& i : m) { + if (i > max_elem) { + max_elem = i; + } + } + return max_elem; +} + +bool vladimirova_j_max_of_vector_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + input_ = std::vector(taskData->inputs_count[0] * taskData->inputs_count[1]); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* input_data = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i * taskData->inputs_count[1] + j] = input_data[j]; + } + } + return true; +} + +bool vladimirova_j_max_of_vector_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + + return ((taskData->inputs_count[0] > 0) && (taskData->inputs_count[1] > 0)) && (taskData->outputs_count[0] == 1); +} + +bool vladimirova_j_max_of_vector_elements_seq::TestTaskSequential::run() { + internal_order_test(); + + res = vladimirova_j_max_of_vector_elements_seq::FindMaxElem(input_); + return true; +} + +bool vladimirova_j_max_of_vector_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From 24dfe884d7183b325d89ec068a9b8dc2a5ac1687 Mon Sep 17 00:00:00 2001 From: Irina2004-tech <111091810+Irina2004-tech@users.noreply.github.com> Date: Sat, 2 Nov 2024 15:21:21 +0300 Subject: [PATCH 053/155] =?UTF-8?q?=D0=9A=D1=83=D0=B4=D1=80=D1=8F=D1=88?= =?UTF-8?q?=D0=BE=D0=B2=D0=B0=20=D0=98=D1=80=D0=B8=D0=BD=D0=B0.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=209.=20=D0=A1=D0=BA=D0=B0=D0=BB=D1=8F=D1=80?= =?UTF-8?q?=D0=BD=D0=BE=D0=B5=20=D0=BF=D1=80=D0=BE=D0=B8=D0=B7=D0=B2=D0=B5?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE?= =?UTF-8?q?=D1=80=D0=BE=D0=B2.=20(#69)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи Задача заключается в вычислении скалярного произведения двух векторов. Алгоритм поэлементно перемножает соответствующие элементы векторов, а затем суммирует полученные результаты. Описание MPI задачи Параллельная задача решается на нескольких процессах MPI. На момент начала рассмотрения задачи мы имеем двухмерный вектор чисел, который содержит два входных вектора. Эти векторы делятся на сегменты в зависимости от количества процессов. Затем каждый сегмент передается соответствующему процессу для дальнейшей обработки. Каждый процесс получает свой сегмент данных, выполняет поэлементное умножение векторов и суммирует результаты. После этого результаты со всех процессов переходят в корневой процесс, где суммируются для получения результата, который и будет ответом. --- .../vectorDotProductMPIFuncTests.cpp | 280 ++++++++++++++++++ .../include/vectorDotProductMPI.hpp | 36 +++ .../vectorDotProductMPIPerfTests.cpp | 90 ++++++ .../src/vectorDotProductMPI.cpp | 122 ++++++++ .../vectorDotProductSeqFuncTests.cpp | 144 +++++++++ .../include/vectorDotProductSeq.hpp | 19 ++ .../vectorDotProductSeqPerfTests.cpp | 70 +++++ .../src/vectorDotProductSeq.cpp | 46 +++ 8 files changed, 807 insertions(+) create mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp create mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp create mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp create mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp create mode 100644 tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp create mode 100644 tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp create mode 100644 tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp create mode 100644 tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp b/tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp new file mode 100644 index 00000000000..5beb915e059 --- /dev/null +++ b/tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp @@ -0,0 +1,280 @@ +#include + +#include + +#include "mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp" + +static int seedOffset = 0; + +std::vector GetRandomVector(int size) { + std::vector vector(size); + std::srand(static_cast(time(nullptr)) + ++seedOffset); + for (int i = 0; i < size; ++i) { + vector[i] = std::rand() % 100 + 1; + } + return vector; +} + +TEST(kudryashova_i_vector_dot_product_mpi, mpi_vectorDotProduct) { + std::vector vector1 = {8, 7, 6}; + std::vector vector2 = {3, 2, 1}; + ASSERT_EQ(44, kudryashova_i_vector_dot_product_mpi::vectorDotProduct(vector1, vector2)); +} + +TEST(kudryashova_i_vector_dot_product_mpi, scalar_multiply_vector_120) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 120; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vector[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vector[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); + ASSERT_EQ(reference[0], result[0]); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, scalar_multiply_vector_360) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 360; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vector[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vector[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); + ASSERT_EQ(reference[0], result[0]); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_vectors_equal) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 100; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_not_equal_vectors) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 100; + std::vector vector1 = GetRandomVector(count_size_vector + 1); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_vectors_dot_product) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 100; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_dot_product_empty_vectors) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::vector vector1 = {}; + std::vector vector2 = {}; + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_dot_product_empty_and_nonempty_vectors) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::vector vector1 = {}; + std::vector vector2 = {1}; + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, scalar_multiply_vector_1_with_zero) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::vector vector1 = {0}; + std::vector vector2 = {1}; + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, scalar_multiply_vector_1) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 1; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vector[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vector[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); + ASSERT_EQ(reference[0], result[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp b/tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp new file mode 100644 index 00000000000..4f85f4ee8a6 --- /dev/null +++ b/tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp @@ -0,0 +1,36 @@ +#pragma once +#include +#include +#include + +#include "core/task/include/task.hpp" +namespace kudryashova_i_vector_dot_product_mpi { +int vectorDotProduct(const std::vector& vector1, const std::vector& vector2); +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int result{}; +}; +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector local_input1_, local_input2_; + int result{}; + boost::mpi::communicator world; + unsigned int delta; +}; +} // namespace kudryashova_i_vector_dot_product_mpi diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp b/tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp new file mode 100644 index 00000000000..a05b89e1e4e --- /dev/null +++ b/tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp @@ -0,0 +1,90 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp" + +static int seedOffset = 0; +std::vector GetRandomVector(int size) { + std::vector vector(size); + std::srand(static_cast(time(nullptr)) + ++seedOffset); + for (int i = 0; i < size; ++i) { + vector[i] = std::rand() % 100 + 1; + } + return vector; +} + +TEST(kudryashova_i_vector_dot_product_mpi, test_pipeline_run) { + const int count = 15000000; + boost::mpi::communicator world; + std::vector> global_vector; + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, test_task_run) { + const int count_size_vector = 15000000; + boost::mpi::communicator world; + std::vector> global_vector; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + std::vector result(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); + } +} diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp b/tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp new file mode 100644 index 00000000000..cf9e1f0c150 --- /dev/null +++ b/tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp @@ -0,0 +1,122 @@ +#include "mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp" + +#include +#include + +int kudryashova_i_vector_dot_product_mpi::vectorDotProduct(const std::vector& vector1, + const std::vector& vector2) { + long long result = 0; + for (unsigned long i = 0; i < vector1.size(); i++) result += vector1[i] * vector2[i]; + return result; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + input_.resize(taskData->inputs.size()); + for (unsigned long i = 0; i < input_.size(); ++i) { + auto* tempPtr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + std::copy(tempPtr, tempPtr + taskData->inputs_count[i], input_[i].begin()); + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && + taskData->outputs.size() == 1 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (unsigned long i = 0; i < input_[0].size(); i++) { + result += input_[1][i] * input_[0][i]; + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + if ((int)(taskData->inputs_count[0]) < world.size()) { + delta = taskData->inputs_count[0]; + } + } + if (world.rank() == 0) { + input_.resize(taskData->inputs.size()); + for (size_t i = 0; i < taskData->inputs.size(); ++i) { + if (taskData->inputs[i] == nullptr || taskData->inputs_count[i] == 0) { + return false; + } + input_[i].resize(taskData->inputs_count[i]); + int* source_ptr = reinterpret_cast(taskData->inputs[i]); + + std::copy(source_ptr, source_ptr + taskData->inputs_count[i], input_[i].begin()); + } + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && + taskData->outputs.size() == 1 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0; + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::run() { + internal_order_test(); + broadcast(world, delta, 0); + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); ++proc) { + world.send(proc, 0, input_[0].data() + proc * delta, delta); + world.send(proc, 1, input_[1].data() + proc * delta, delta); + } + } + local_input1_.resize(delta); + local_input2_.resize(delta); + if (world.rank() == 0) { + std::copy(input_[0].begin(), input_[0].begin() + delta, local_input1_.begin()); + std::copy(input_[1].begin(), input_[1].begin() + delta, local_input2_.begin()); + } else { + world.recv(0, 0, local_input1_.data(), delta); + world.recv(0, 1, local_input2_.data(), delta); + } + int local_result = std::inner_product(local_input1_.begin(), local_input1_.end(), local_input2_.begin(), 0); + std::vector full_results; + gather(world, local_result, full_results, 0); + + if (world.rank() == 0) { + result = std::accumulate(full_results.begin(), full_results.end(), 0); + } + if (world.rank() == 0 && (int)(taskData->inputs_count[0]) < world.size()) { + result = std::inner_product(input_[0].begin(), input_[0].end(), input_[1].begin(), 0); + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + if (!taskData->outputs.empty()) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } else { + return false; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp b/tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp new file mode 100644 index 00000000000..8565b68475e --- /dev/null +++ b/tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp @@ -0,0 +1,144 @@ +#include + +#include "seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp" + +static int seedOffset = 0; +std::vector GetRandomVector(int size) { + std::vector vector(size); + std::srand(static_cast(time(nullptr)) + ++seedOffset); + for (int i = 0; i < size; ++i) { + vector[i] = std::rand() % 100 + 1; + } + return vector; +} + +TEST(kudryashova_i_vector_dot_product_seq, check_vectorDotProduct) { + // Create data + std::vector vector1 = {1, 8, 14}; + std::vector vector2 = {3, 6, 5}; + ASSERT_EQ(121, kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2)); +} + +TEST(kudryashova_i_vector_dot_product_seq, scalar_multiply_vector_size_50) { + const int count = 50; + // Create data + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); +} + +TEST(kudryashova_i_vector_dot_product_seq, scalar_multiply_vector_size_120) { + const int count = 120; + // Create data + std::vector out(1, 0); + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); +} + +TEST(kudryashova_i_vector_dot_product_seq, check_equal_vectors) { + const int count = 10; + // Create data + std::vector out(1, 0); + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); +} + +TEST(kudryashova_i_vector_dot_product_seq, checks_not_equal_vector) { + const int count = 10; + // Create data + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count + 1); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(kudryashova_i_vector_dot_product_seq, check_empty_vectors) { + // Create data + std::vector vector1 = {}; + std::vector vector2 = {}; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(kudryashova_i_vector_dot_product_seq, check_run) { + // Create data + std::vector out(1, 0); + std::vector vector1 = {1, 8, 14}; + std::vector vector2 = {3, 6, 5}; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(121, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp b/tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp new file mode 100644 index 00000000000..7e7e7a07a02 --- /dev/null +++ b/tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp @@ -0,0 +1,19 @@ +#pragma once +#include + +#include "core/task/include/task.hpp" +namespace kudryashova_i_vector_dot_product { +int vectorDotProduct(const std::vector& vector1, const std::vector& vector2); +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_{}; + int result{}; +}; +} // namespace kudryashova_i_vector_dot_product diff --git a/tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp b/tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp new file mode 100644 index 00000000000..6a6e3051f7c --- /dev/null +++ b/tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp @@ -0,0 +1,70 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp" + +static int seedOffset = 0; +std::vector GetRandomVector(int size) { + std::vector vector(size); + std::srand(static_cast(time(nullptr)) + ++seedOffset); + for (int i = 0; i < size; ++i) { + vector[i] = std::rand() % 100 + 1; + } + return vector; +} + +TEST(kudryashova_i_vector_dot_product_seq, test_pipeline_run) { + const int count_size = 15000000; + std::vector vector1 = GetRandomVector(count_size); + std::vector vector2 = GetRandomVector(count_size); + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + auto testTaskSequential = std::make_shared(taskDataSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); +} + +TEST(kudryashova_i_vector_dot_product_seq, test_task_run) { + const int count = 15000000; + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + auto testTaskSequential = std::make_shared(taskDataSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); +} \ No newline at end of file diff --git a/tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp b/tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp new file mode 100644 index 00000000000..f8d3c5cbfcc --- /dev/null +++ b/tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp @@ -0,0 +1,46 @@ +#include "seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp" + +#include + +int kudryashova_i_vector_dot_product::vectorDotProduct(const std::vector& vector1, + const std::vector& vector2) { + long long result = 0; + for (unsigned long i = 0; i < vector1.size(); ++i) { + result += vector1[i] * vector2[i]; + } + return result; +} + +bool kudryashova_i_vector_dot_product::TestTaskSequential::pre_processing() { + internal_order_test(); + + input_.resize(taskData->inputs.size()); + for (unsigned long i = 0; i < input_.size(); ++i) { + auto* tempPtr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + std::copy(tempPtr, tempPtr + taskData->inputs_count[i], input_[i].begin()); + } + return true; +} + +bool kudryashova_i_vector_dot_product::TestTaskSequential::validation() { + internal_order_test(); + return (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && + taskData->outputs.size() == 1 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0; +} + +bool kudryashova_i_vector_dot_product::TestTaskSequential::run() { + internal_order_test(); + for (unsigned long i = 0; i < input_[0].size(); i++) { + result += input_[1][i] * input_[0][i]; + } + return true; +} + +bool kudryashova_i_vector_dot_product::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} \ No newline at end of file From 1255bf4440a4514dac45b0e849130f5fd8b227b9 Mon Sep 17 00:00:00 2001 From: VasenkovAA <114131027+VasenkovAA@users.noreply.github.com> Date: Sat, 2 Nov 2024 15:21:42 +0300 Subject: [PATCH 054/155] =?UTF-8?q?=D0=92=D0=B0=D1=81=D0=B5=D0=BD=D0=BA?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=90=D0=BD=D0=B4=D1=80=D0=B5=D0=B9.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=2023.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D0=B5?= =?UTF-8?q?=D1=82=20=D1=87=D0=B0=D1=81=D1=82=D0=BE=D1=82=D1=8B=20=D1=81?= =?UTF-8?q?=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=B0=20=D0=B2=20=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=BE=D0=BA=D0=B5=20(#51)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи. Входные данные - строка и символ, число вхождений которого нужно найти. Выходные данные - целочисленное значение - количество символа в строке. Для простоты и читаемости кода подсчет ведется с помощью метода count библиотеки std. Описание параллельной задачи. Разделяем строку на несколько подстрок и поручаем эту задачу нескольким потокам. Разделение происходит равномерно. Затем каждый поток считает число входов символа в своей части строки. В итоге всё суммируется с помощью reduce. --- .../vasenkov_a_char_freq/func_tests/main.cpp | 467 ++++++++++++++++++ .../vasenkov_a_char_freq/include/ops_mpi.hpp | 51 ++ .../vasenkov_a_char_freq/perf_tests/main.cpp | 88 ++++ .../mpi/vasenkov_a_char_freq/src/ops_mpi.cpp | 100 ++++ .../vasenkov_a_char_freq/func_tests/main.cpp | 282 +++++++++++ .../vasenkov_a_char_freq/include/ops_seq.hpp | 24 + .../vasenkov_a_char_freq/perf_tests/main.cpp | 80 +++ .../seq/vasenkov_a_char_freq/src/ops_seq.cpp | 32 ++ 8 files changed, 1124 insertions(+) create mode 100644 tasks/mpi/vasenkov_a_char_freq/func_tests/main.cpp create mode 100644 tasks/mpi/vasenkov_a_char_freq/include/ops_mpi.hpp create mode 100644 tasks/mpi/vasenkov_a_char_freq/perf_tests/main.cpp create mode 100644 tasks/mpi/vasenkov_a_char_freq/src/ops_mpi.cpp create mode 100644 tasks/seq/vasenkov_a_char_freq/func_tests/main.cpp create mode 100644 tasks/seq/vasenkov_a_char_freq/include/ops_seq.hpp create mode 100644 tasks/seq/vasenkov_a_char_freq/perf_tests/main.cpp create mode 100644 tasks/seq/vasenkov_a_char_freq/src/ops_seq.cpp diff --git a/tasks/mpi/vasenkov_a_char_freq/func_tests/main.cpp b/tasks/mpi/vasenkov_a_char_freq/func_tests/main.cpp new file mode 100644 index 00000000000..9d0f4301e8f --- /dev/null +++ b/tasks/mpi/vasenkov_a_char_freq/func_tests/main.cpp @@ -0,0 +1,467 @@ +#include + +#include +#include +#include +#include + +#include "mpi/vasenkov_a_char_freq/include/ops_mpi.hpp" + +TEST(vasenkov_a_char_freq_mpi, test_all_same_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + vasenkov_a_char_freq_mpi::CharFrequencySequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_no_occurrences) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'z'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + vasenkov_a_char_freq_mpi::CharFrequencySequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_mixed_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'b'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::vector(count_size_str, 'a'); + for (int i = 0; i < count_size_str; i += 3) { + global_str[i] = 'b'; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + vasenkov_a_char_freq_mpi::CharFrequencySequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_empty_string) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 0); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_1) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(1, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_2) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_3) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b', 'c'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_5) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b', 'c', 'd', 'i'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_7) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b', 'c', 'a', 'd', 'i', 'g'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 2); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_1_no_target_char) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'b'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(1, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 0); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_5_with_multiple_target_chars) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = {'a', 'b', 'a', 'c', 'a'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 3); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_100_with_no_target_char) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'z'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(100, 'x'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 0); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_150_all_target_chars) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'y'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(150, 'y'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 150); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_string_length_150_half_target_chars) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(75, 'a'); + global_str.insert(global_str.end(), 75, 'b'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + vasenkov_a_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 75); + } +} diff --git a/tasks/mpi/vasenkov_a_char_freq/include/ops_mpi.hpp b/tasks/mpi/vasenkov_a_char_freq/include/ops_mpi.hpp new file mode 100644 index 00000000000..df50a5100bd --- /dev/null +++ b/tasks/mpi/vasenkov_a_char_freq/include/ops_mpi.hpp @@ -0,0 +1,51 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace vasenkov_a_char_freq_mpi { + +class CharFrequencySequential : public ppc::core::Task { + public: + explicit CharFrequencySequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector str_input_; + char target_char_; + int res{}; +}; + +class CharFrequencyParallel : public ppc::core::Task { + public: + explicit CharFrequencyParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector str_input_; + std::vector local_input_; + char target_char_; + int res{}; + int local_res{}; + + boost::mpi::communicator world; +}; + +} // namespace vasenkov_a_char_freq_mpi diff --git a/tasks/mpi/vasenkov_a_char_freq/perf_tests/main.cpp b/tasks/mpi/vasenkov_a_char_freq/perf_tests/main.cpp new file mode 100644 index 00000000000..6ff7a3e6bfc --- /dev/null +++ b/tasks/mpi/vasenkov_a_char_freq/perf_tests/main.cpp @@ -0,0 +1,88 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/vasenkov_a_char_freq/include/ops_mpi.hpp" + +TEST(vasenkov_a_char_freq_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + if (world.rank() == 0) { + count_size_str = 150000000; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_str, global_count[0]); + } +} + +TEST(vasenkov_a_char_freq_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + if (world.rank() == 0) { + count_size_str = 150000000; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_str, global_count[0]); + } +} diff --git a/tasks/mpi/vasenkov_a_char_freq/src/ops_mpi.cpp b/tasks/mpi/vasenkov_a_char_freq/src/ops_mpi.cpp new file mode 100644 index 00000000000..9c036aa8161 --- /dev/null +++ b/tasks/mpi/vasenkov_a_char_freq/src/ops_mpi.cpp @@ -0,0 +1,100 @@ +#include "mpi/vasenkov_a_char_freq/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool vasenkov_a_char_freq_mpi::CharFrequencySequential::pre_processing() { + internal_order_test(); + + str_input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], str_input_.begin()); + + target_char_ = *reinterpret_cast(taskData->inputs[1]); + res = 0; + return true; +} + +bool vasenkov_a_char_freq_mpi::CharFrequencySequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool vasenkov_a_char_freq_mpi::CharFrequencySequential::run() { + internal_order_test(); + + res = std::count(str_input_.begin(), str_input_.end(), target_char_); + return true; +} + +bool vasenkov_a_char_freq_mpi::CharFrequencySequential::post_processing() { + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool vasenkov_a_char_freq_mpi::CharFrequencyParallel::pre_processing() { + internal_order_test(); + int myid = world.rank(); + int world_size = world.size(); + unsigned int n = 0; + + if (myid == 0) { + n = taskData->inputs_count[0]; + str_input_ = std::vector(taskData->inputs[0], taskData->inputs[0] + n); + target_char_ = *reinterpret_cast(taskData->inputs[1]); + } + + boost::mpi::broadcast(world, n, 0); + boost::mpi::broadcast(world, target_char_, 0); + + unsigned int vec_send_size = n / world_size; + unsigned int overflow_size = n % world_size; + + std::vector send_counts(world_size, vec_send_size + (overflow_size > 0 ? 1 : 0)); + std::vector displs(world_size, 0); + + for (unsigned int i = 1; i < static_cast(world_size); ++i) { + if (i >= overflow_size) send_counts[i] = vec_send_size; + displs[i] = displs[i - 1] + send_counts[i - 1]; + } + + local_input_.resize(send_counts[myid]); + boost::mpi::scatterv(world, str_input_.data(), send_counts, displs, local_input_.data(), send_counts[myid], 0); + + return true; +} + +bool vasenkov_a_char_freq_mpi::CharFrequencyParallel::run() { + internal_order_test(); + + local_res = std::count(local_input_.begin(), local_input_.end(), target_char_); + boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); + + return true; +} + +bool vasenkov_a_char_freq_mpi::CharFrequencyParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool vasenkov_a_char_freq_mpi::CharFrequencyParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + + return true; +} diff --git a/tasks/seq/vasenkov_a_char_freq/func_tests/main.cpp b/tasks/seq/vasenkov_a_char_freq/func_tests/main.cpp new file mode 100644 index 00000000000..6fcfe02a01e --- /dev/null +++ b/tasks/seq/vasenkov_a_char_freq/func_tests/main.cpp @@ -0,0 +1,282 @@ +#include + +#include +#include +#include + +#include "seq/vasenkov_a_char_freq/include/ops_seq.hpp" + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_a_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'a'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_b_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'b'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_c_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'c'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_x_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'x'; + int expected_frequency = 0; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_a_in_long_string) { + std::string input_str(1000000, 'a'); + char target_char = 'a'; + int expected_frequency = 1000000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_in_empty_string) { + std::string input_str; + char target_char = 'a'; + int expected_frequency = 0; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_a_in_single_char_a) { + std::string input_str = "a"; + char target_char = 'a'; + int expected_frequency = 1; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_b_in_ababa) { + std::string input_str = "ababa"; + char target_char = 'b'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_c_in_150_chars) { + std::string input_str(150, 'c'); + char target_char = 'c'; + int expected_frequency = 150; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_x_in_mixed_string) { + std::string input_str = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"; + char target_char = 'x'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_char_frequency_none_in_long_string) { + std::string input_str(150, 'a'); + char target_char = 'b'; + int expected_frequency = 0; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} diff --git a/tasks/seq/vasenkov_a_char_freq/include/ops_seq.hpp b/tasks/seq/vasenkov_a_char_freq/include/ops_seq.hpp new file mode 100644 index 00000000000..0171df4e419 --- /dev/null +++ b/tasks/seq/vasenkov_a_char_freq/include/ops_seq.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace vasenkov_a_char_frequency_seq { + +class CharFrequencyTaskSequential : public ppc::core::Task { + public: + explicit CharFrequencyTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + char target_char_; + int frequency_ = 0; + std::string str_input_; +}; + +} // namespace vasenkov_a_char_frequency_seq diff --git a/tasks/seq/vasenkov_a_char_freq/perf_tests/main.cpp b/tasks/seq/vasenkov_a_char_freq/perf_tests/main.cpp new file mode 100644 index 00000000000..b10c9f4eb9a --- /dev/null +++ b/tasks/seq/vasenkov_a_char_freq/perf_tests/main.cpp @@ -0,0 +1,80 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/vasenkov_a_char_freq/include/ops_seq.hpp" + +TEST(vasenkov_a_char_frequency_seq, test_pipeline_run) { + std::string input_str(150000000, 'a'); + char target_char = 'a'; + int expected_frequency = 150000000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(vasenkov_a_char_frequency_seq, test_task_run) { + std::string input_str(150000000, 'a'); + char target_char = 'a'; + int expected_frequency = 150000000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_frequency, out[0]); +} diff --git a/tasks/seq/vasenkov_a_char_freq/src/ops_seq.cpp b/tasks/seq/vasenkov_a_char_freq/src/ops_seq.cpp new file mode 100644 index 00000000000..2848bd7c8e3 --- /dev/null +++ b/tasks/seq/vasenkov_a_char_freq/src/ops_seq.cpp @@ -0,0 +1,32 @@ +#include "seq/vasenkov_a_char_freq/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential::pre_processing() { + internal_order_test(); + str_input_ = *reinterpret_cast(taskData->inputs[0]); + target_char_ = *reinterpret_cast(taskData->inputs[1]); + frequency_ = 0; + return true; +} + +bool vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential::validation() { + internal_order_test(); + return (taskData->inputs_count[0] == 1) && (taskData->inputs_count[1] == 1) && (taskData->outputs_count[0] == 1); +} + +bool vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential::run() { + internal_order_test(); + frequency_ = std::count(str_input_.begin(), str_input_.end(), target_char_); + return true; +} + +bool vasenkov_a_char_frequency_seq::CharFrequencyTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = frequency_; + return true; +} \ No newline at end of file From 936da00b9adf1b3679e2098c6ffacc75ef6c9eac Mon Sep 17 00:00:00 2001 From: MikeMuradov <133360712+MikeMuradov@users.noreply.github.com> Date: Sat, 2 Nov 2024 16:27:00 +0300 Subject: [PATCH 055/155] =?UTF-8?q?=D0=9C=D1=83=D1=80=D0=B0=D0=B4=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=9C=D0=B0=D0=B9=D0=BA.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2022.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D0=B5=D1=82=20=D1=87?= =?UTF-8?q?=D0=B8=D1=81=D0=BB=D0=B0=20=D0=B1=D1=83=D0=BA=D0=B2=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D1=8B=D1=85=20=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5=20(#84)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## SEQ версия 1. Загружается входная строка, переменная `alpha_count_` обнуляется 2. Подсчитывается количество буквенных символов с помощью `std::count_if` и `std::isalpha` 3. Значение `alpha_count_` записывается в `taskData->outputs` ## MPI версия 1. Процесс с `rank = 0` загружает входную строку, длина `n` передается всем процессам 2. Процесс 0 делит строку на части и передает их каждому процессу с помощью `scatterv` 3. Каждый процесс подсчитывает количество буквенных символов в своей части строки 4. Функция `reduce` суммирует локальные результаты в `total_alpha_count_` 5. Процесс 0 записывает `total_alpha_count_` в `taskData->outputs` --- .../func_tests/main.cpp | 148 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 ++++++ .../perf_tests/main.cpp | 104 ++++++++++++ .../src/ops_mpi.cpp | 110 +++++++++++++ .../func_tests/main.cpp | 116 ++++++++++++++ .../include/ops_seq.hpp | 24 +++ .../perf_tests/main.cpp | 93 +++++++++++ .../src/ops_seq.cpp | 33 ++++ 8 files changed, 676 insertions(+) create mode 100644 tasks/mpi/muradov_m_count_alpha_chars/func_tests/main.cpp create mode 100644 tasks/mpi/muradov_m_count_alpha_chars/include/ops_mpi.hpp create mode 100644 tasks/mpi/muradov_m_count_alpha_chars/perf_tests/main.cpp create mode 100644 tasks/mpi/muradov_m_count_alpha_chars/src/ops_mpi.cpp create mode 100644 tasks/seq/muradov_m_count_alpha_chars/func_tests/main.cpp create mode 100644 tasks/seq/muradov_m_count_alpha_chars/include/ops_seq.hpp create mode 100644 tasks/seq/muradov_m_count_alpha_chars/perf_tests/main.cpp create mode 100644 tasks/seq/muradov_m_count_alpha_chars/src/ops_seq.cpp diff --git a/tasks/mpi/muradov_m_count_alpha_chars/func_tests/main.cpp b/tasks/mpi/muradov_m_count_alpha_chars/func_tests/main.cpp new file mode 100644 index 00000000000..39400064c0c --- /dev/null +++ b/tasks/mpi/muradov_m_count_alpha_chars/func_tests/main.cpp @@ -0,0 +1,148 @@ +#include + +#include +#include +#include +#include +#include + +#include "mpi/muradov_m_count_alpha_chars/include/ops_mpi.hpp" + +TEST(muradov_m_count_alpha_chars_mpi, test_all_alpha_characters) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::string(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(muradov_m_count_alpha_chars_mpi, test_mixed_characters) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::string(count_size_str, '1'); + for (int i = 0; i < count_size_str; i += 2) { + global_str[i] = 'b'; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +void run_test_for_string(const std::string& test_str) { + boost::mpi::communicator world; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + int expected_alpha_count = 0; + + if (world.rank() == 0) { + expected_alpha_count = std::count_if(test_str.begin(), test_str.end(), + [](char c) { return std::isalpha(static_cast(c)); }); + + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(test_str.data()))); + taskDataPar->inputs_count.emplace_back(test_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(test_str.data()))); + taskDataSeq->inputs_count.emplace_back(test_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + ASSERT_EQ(expected_alpha_count, global_count[0]); + } +} + +TEST(muradov_m_count_alpha_chars_mpi, test_various_lengths) { + run_test_for_string(""); + run_test_for_string("A"); + run_test_for_string("Ab"); + run_test_for_string("A1b"); + run_test_for_string("A1bC2"); + run_test_for_string("A1bC2d3"); + run_test_for_string("AbCdeFg"); + run_test_for_string("A1b2C3d4e"); +} \ No newline at end of file diff --git a/tasks/mpi/muradov_m_count_alpha_chars/include/ops_mpi.hpp b/tasks/mpi/muradov_m_count_alpha_chars/include/ops_mpi.hpp new file mode 100644 index 00000000000..da24d5bdb1b --- /dev/null +++ b/tasks/mpi/muradov_m_count_alpha_chars/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace muradov_m_count_alpha_chars_mpi { + +class AlphaCharCountTaskSequential : public ppc::core::Task { + public: + explicit AlphaCharCountTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_str_; + int alpha_count_ = 0; +}; + +class AlphaCharCountTaskParallel : public ppc::core::Task { + public: + explicit AlphaCharCountTaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_str_; + std::vector local_input_; + int local_alpha_count_ = 0; + int total_alpha_count_ = 0; + + boost::mpi::communicator world; +}; + +} // namespace muradov_m_count_alpha_chars_mpi diff --git a/tasks/mpi/muradov_m_count_alpha_chars/perf_tests/main.cpp b/tasks/mpi/muradov_m_count_alpha_chars/perf_tests/main.cpp new file mode 100644 index 00000000000..44f848d212a --- /dev/null +++ b/tasks/mpi/muradov_m_count_alpha_chars/perf_tests/main.cpp @@ -0,0 +1,104 @@ +#include + +#include +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/muradov_m_count_alpha_chars/include/ops_mpi.hpp" + +std::string generate_string(size_t length) { + std::string characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()"; + std::string result; + result.reserve(length); + + std::default_random_engine generator; + std::uniform_int_distribution distribution(0, characters.size() - 1); + + for (size_t i = 0; i < length; ++i) { + result += characters[distribution(generator)]; + } + + return result; +} + +TEST(muradov_m_count_alpha_chars_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + int expected_alpha_count; + if (world.rank() == 0) { + global_str = generate_string(9999999); + + expected_alpha_count = std::count_if(global_str.begin(), global_str.end(), + [](char c) { return std::isalpha(static_cast(c)); }); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_alpha_count, global_count[0]); + } +} + +TEST(muradov_m_count_alpha_chars_mpi, test_task_run) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + int expected_alpha_count; + if (world.rank() == 0) { + global_str = generate_string(9999999); + + expected_alpha_count = std::count_if(global_str.begin(), global_str.end(), + [](char c) { return std::isalpha(static_cast(c)); }); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_alpha_count, global_count[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/muradov_m_count_alpha_chars/src/ops_mpi.cpp b/tasks/mpi/muradov_m_count_alpha_chars/src/ops_mpi.cpp new file mode 100644 index 00000000000..3703e0e9f07 --- /dev/null +++ b/tasks/mpi/muradov_m_count_alpha_chars/src/ops_mpi.cpp @@ -0,0 +1,110 @@ +#include "mpi/muradov_m_count_alpha_chars/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskSequential::pre_processing() { + internal_order_test(); + + input_str_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_str_.begin()); + + alpha_count_ = 0; + return true; +} + +bool muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskSequential::run() { + internal_order_test(); + + alpha_count_ = std::count_if(input_str_.begin(), input_str_.end(), + [](char c) { return std::isalpha(static_cast(c)); }); + return true; +} + +bool muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskSequential::post_processing() { + reinterpret_cast(taskData->outputs[0])[0] = alpha_count_; + return true; +} + +bool muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskParallel::pre_processing() { + internal_order_test(); + + local_alpha_count_ = 0; + total_alpha_count_ = 0; + + return true; +} + +bool muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskParallel::run() { + internal_order_test(); + + int myid = world.rank(); + int world_size = world.size(); + unsigned int n = 0; + + if (myid == 0) { + n = taskData->inputs_count[0]; + input_str_ = std::vector(n); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + memcpy(input_str_.data(), tmp_ptr, sizeof(char) * n); + } + + boost::mpi::broadcast(world, n, 0); + + unsigned int vec_send_size = n / world_size; + unsigned int overflow_size = n % world_size; + std::vector send_counts(world_size, vec_send_size); + std::vector displs(world_size, 0); + + for (unsigned int i = 0; i < static_cast(world_size); ++i) { + if (i < static_cast(overflow_size)) { + ++send_counts[i]; + } + if (i > 0) { + displs[i] = displs[i - 1] + send_counts[i - 1]; + } + } + + unsigned int loc_vec_size = send_counts[myid]; + local_input_.resize(loc_vec_size); + + boost::mpi::scatterv(world, input_str_.data(), send_counts, displs, local_input_.data(), loc_vec_size, 0); + + local_alpha_count_ = std::count_if(local_input_.begin(), local_input_.end(), + [](char c) { return std::isalpha(static_cast(c)); }); + + boost::mpi::reduce(world, local_alpha_count_, total_alpha_count_, std::plus<>(), 0); + + return true; +} + +bool muradov_m_count_alpha_chars_mpi::AlphaCharCountTaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = total_alpha_count_; + } + + return true; +} diff --git a/tasks/seq/muradov_m_count_alpha_chars/func_tests/main.cpp b/tasks/seq/muradov_m_count_alpha_chars/func_tests/main.cpp new file mode 100644 index 00000000000..3aa9d0daccf --- /dev/null +++ b/tasks/seq/muradov_m_count_alpha_chars/func_tests/main.cpp @@ -0,0 +1,116 @@ +#include + +#include +#include +#include + +#include "seq/muradov_m_count_alpha_chars/include/ops_seq.hpp" + +int count_alpha_chars(const std::string& str) { + return std::count_if(str.begin(), str.end(), [](char c) { return std::isalpha(static_cast(c)); }); +} + +TEST(muradov_m_count_alpha_chars_seq, test_empty_string) { + std::string input_str; + int expected_alpha_count = 0; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + muradov_m_count_alpha_chars_seq::AlphaCharCountTaskSequential task(taskDataSeq); + ASSERT_EQ(task.validation(), true); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected_alpha_count, out[0]); +} + +TEST(muradov_m_count_alpha_chars_seq, test_only_non_alpha_characters) { + std::string input_str = "1234567890!@#$%^&*()"; + int expected_alpha_count = 0; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + muradov_m_count_alpha_chars_seq::AlphaCharCountTaskSequential task(taskDataSeq); + ASSERT_EQ(task.validation(), true); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected_alpha_count, out[0]); +} + +TEST(muradov_m_count_alpha_chars_seq, test_mixed_alpha_and_non_alpha) { + std::string input_str = "Hello, World! 123"; + int expected_alpha_count = count_alpha_chars(input_str); + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + muradov_m_count_alpha_chars_seq::AlphaCharCountTaskSequential task(taskDataSeq); + ASSERT_EQ(task.validation(), true); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected_alpha_count, out[0]); +} + +TEST(muradov_m_count_alpha_chars_seq, test_all_alpha_characters) { + std::string input_str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + int expected_alpha_count = count_alpha_chars(input_str); + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + muradov_m_count_alpha_chars_seq::AlphaCharCountTaskSequential task(taskDataSeq); + ASSERT_EQ(task.validation(), true); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected_alpha_count, out[0]); +} + +TEST(muradov_m_count_alpha_chars_seq, test_large_input_string) { + std::string input_str = std::string(100000, 'a') + std::string(100000, '1'); + int expected_alpha_count = count_alpha_chars(input_str); + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + muradov_m_count_alpha_chars_seq::AlphaCharCountTaskSequential task(taskDataSeq); + ASSERT_EQ(task.validation(), true); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected_alpha_count, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/muradov_m_count_alpha_chars/include/ops_seq.hpp b/tasks/seq/muradov_m_count_alpha_chars/include/ops_seq.hpp new file mode 100644 index 00000000000..d0a42a9a238 --- /dev/null +++ b/tasks/seq/muradov_m_count_alpha_chars/include/ops_seq.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include // для std::shared_ptr +#include + +#include "core/task/include/task.hpp" + +namespace muradov_m_count_alpha_chars_seq { + +class AlphaCharCountTaskSequential : public ppc::core::Task { + public: + explicit AlphaCharCountTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_str_; + int alpha_count_ = 0; +}; + +} // namespace muradov_m_count_alpha_chars_seq diff --git a/tasks/seq/muradov_m_count_alpha_chars/perf_tests/main.cpp b/tasks/seq/muradov_m_count_alpha_chars/perf_tests/main.cpp new file mode 100644 index 00000000000..4964d6df50b --- /dev/null +++ b/tasks/seq/muradov_m_count_alpha_chars/perf_tests/main.cpp @@ -0,0 +1,93 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/muradov_m_count_alpha_chars/include/ops_seq.hpp" + +std::string generate_large_string(size_t length) { + std::string characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()"; + std::string result; + result.reserve(length); + + std::default_random_engine generator; + std::uniform_int_distribution distribution(0, characters.size() - 1); + + for (size_t i = 0; i < length; ++i) { + result += characters[distribution(generator)]; + } + + return result; +} + +TEST(muradov_m_count_alpha_chars_seq, test_pipeline_run) { + std::string input_str = generate_large_string(1000000); + int expected_alpha_count = std::count_if(input_str.begin(), input_str.end(), + [](char c) { return std::isalpha(static_cast(c)); }); + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto alphaCharCountTask = + std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(alphaCharCountTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_alpha_count, out[0]); +} + +TEST(muradov_m_count_alpha_chars_seq, test_task_run) { + std::string input_str = generate_large_string(1000000); + int expected_alpha_count = std::count_if(input_str.begin(), input_str.end(), + [](char c) { return std::isalpha(static_cast(c)); }); + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto alphaCharCountTask = + std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(alphaCharCountTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_alpha_count, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/muradov_m_count_alpha_chars/src/ops_seq.cpp b/tasks/seq/muradov_m_count_alpha_chars/src/ops_seq.cpp new file mode 100644 index 00000000000..e2f9b56e2c6 --- /dev/null +++ b/tasks/seq/muradov_m_count_alpha_chars/src/ops_seq.cpp @@ -0,0 +1,33 @@ +#include "seq/muradov_m_count_alpha_chars/include/ops_seq.hpp" + +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool muradov_m_count_alpha_chars_seq::AlphaCharCountTaskSequential::pre_processing() { + internal_order_test(); + input_str_ = *reinterpret_cast(taskData->inputs[0]); + alpha_count_ = 0; + return true; +} + +bool muradov_m_count_alpha_chars_seq::AlphaCharCountTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1; +} + +bool muradov_m_count_alpha_chars_seq::AlphaCharCountTaskSequential::run() { + internal_order_test(); + alpha_count_ = std::count_if(input_str_.begin(), input_str_.end(), + [](char c) { return std::isalpha(static_cast(c)); }); + return true; +} + +bool muradov_m_count_alpha_chars_seq::AlphaCharCountTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = alpha_count_; + return true; +} From a3fda0a58022d6481a139242eb82c896e4b69556 Mon Sep 17 00:00:00 2001 From: Gn4ik <112872726+Gn4ik@users.noreply.github.com> Date: Sun, 3 Nov 2024 04:48:07 +0300 Subject: [PATCH 056/155] =?UTF-8?q?=D0=93=D0=BD=D0=B8=D1=82=D0=B8=D0=B5?= =?UTF-8?q?=D0=BD=D0=BA=D0=BE=20=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2011.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0?= =?UTF-8?q?=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF?= =?UTF-8?q?=D0=BE=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC?= =?UTF-8?q?=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B=20(#58)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Описание последовательной задачи - **pre_processing**: Подготавливаются данные, извлекая размеры матрицы и копируя входные данные в вектор input_. - **validation**: Проверяется корректность входных и выходных данных, включая размеры и количество элементов. - **run**: Основная логика суммирования по строкам: Матрица задана в виде одномерного вектора. Внешний цикл проходит по каждой строке матрицы (от 0 до rows). Внутренний цикл суммирует все элементы в текущей строке (от 0 до cols). Сумма элементов строки сохраняется в результирующем векторе res. - **post_processing**: Запись результатов в выходные данные. ### Описание MPI задачи - **pre_processing**: Инициализируются размеры матрицы и распределяет данные между процессами. Главный процесс загружает данные и передаёт их другим процессам через broadcast. - **validation**: Проверяется корректность входных и выходных данных аналогично последовательной задаче, но только на главном процессе. - **run**: Выполняется суммирование по строкам в каждом процессе. Каждый процесс вычисляет количество строк, которые ему необходимо обработать, вызывает метод mainFunc, передавая ему диапазон строк на обработку. Внутри mainFunc аналогично последовательному варианту происходит суммирование по строкам, но теперь каждый процесс обрабатывает только свою часть матрицы. Результаты собираются на главном процессе с помощью gatherv. - **post_processing**: Главный процесс записывает собранные результаты в выходные данные. --- .../func_tests/main.cpp | 457 ++++++++++++++++++ .../include/ops_mpi.hpp | 49 ++ .../perf_tests/main.cpp | 94 ++++ .../src/ops_mpi.cpp | 134 +++++ .../func_tests/main.cpp | 187 +++++++ .../include/ops_seq.hpp | 25 + .../perf_tests/main.cpp | 103 ++++ .../src/ops_seq.cpp | 55 +++ 8 files changed, 1104 insertions(+) create mode 100644 tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/gnitienko_k_sum_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/seq/gnitienko_k_sum_values_by_rows_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/gnitienko_k_sum_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/gnitienko_k_sum_values_by_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/func_tests/main.cpp b/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..52531436009 --- /dev/null +++ b/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,457 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/gnitienko_k_sum_values_by_rows_matrix/include/ops_mpi.hpp" + +TEST(gnitienko_k_sum_values_by_rows_MPI, test_empty_matrix) { + boost::mpi::communicator world; + + int rows = 0; + int cols = 0; + + std::vector global_vec; + std::vector resMPI; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(rows); + } + + gnitienko_k_sum_row_mpi::SumByRowMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector resSeq; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(rows); + + // Create Task + gnitienko_k_sum_row_mpi::SumByRowMPISeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, resSeq); + } +} + +TEST(gnitienko_k_sum_values_by_rows_MPI, test_of_a_given_matrix) { + boost::mpi::communicator world; + + int cols = 3; + int rows = 4; + + std::vector global_vec = {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4}; + std::vector resMPI(rows, 0); + std::vector expected_sums = {3, 6, 9, 12}; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(rows); + } + + gnitienko_k_sum_row_mpi::SumByRowMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector resSeq(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(rows); + + // Create Task + gnitienko_k_sum_row_mpi::SumByRowMPISeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, resSeq); + } +} + +TEST(gnitienko_k_sum_values_by_rows_MPI, test_large_matrix) { + boost::mpi::communicator world; + + int cols = 1000; + int rows = 2000; + + std::vector global_vec(rows * cols, 0); + std::vector resMPI(rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(rows); + } + + gnitienko_k_sum_row_mpi::SumByRowMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector resSeq(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(rows); + + // Create Task + gnitienko_k_sum_row_mpi::SumByRowMPISeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, resSeq); + } +} + +TEST(gnitienko_k_sum_values_by_rows_MPI, test_negative_values) { + boost::mpi::communicator world; + + int cols = 100; + int rows = 100; + + std::vector global_vec(rows * cols, -1); + std::vector resMPI(rows, 0); + std::vector expect(rows, -100); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(rows); + } + + gnitienko_k_sum_row_mpi::SumByRowMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector resSeq(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(rows); + + // Create Task + gnitienko_k_sum_row_mpi::SumByRowMPISeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, resSeq); + } +} + +TEST(gnitienko_k_sum_values_by_rows_MPI, test_output_element) { + boost::mpi::communicator world; + + int cols = 3; + int rows = 4; + + std::vector global_vec = {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4}; + std::vector resMPI(rows, 0); + int expected_out_3 = 12; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(rows); + } + + gnitienko_k_sum_row_mpi::SumByRowMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector resSeq(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(rows); + + // Create Task + gnitienko_k_sum_row_mpi::SumByRowMPISeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI[3], expected_out_3); + ASSERT_EQ(resSeq[3], expected_out_3); + } +} + +TEST(gnitienko_k_sum_values_by_rows_MPI, test_random_matrix) { + boost::mpi::communicator world; + + int cols = 3; + int rows = 4; + + std::vector global_vec = gnitienko_k_sum_row_mpi::getRandomVector(rows * cols); + std::vector resMPI(rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(rows); + } + + gnitienko_k_sum_row_mpi::SumByRowMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector resSeq(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(rows); + + // Create Task + gnitienko_k_sum_row_mpi::SumByRowMPISeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, resSeq); + } +} + +TEST(gnitienko_k_sum_values_by_rows_MPI, test_of_empty_matrix_2) { + boost::mpi::communicator world; + + int cols = 0; + int rows = 4; + + std::vector global_vec; + std::vector resMPI(rows, 0); + std::vector expected_sums = {0, 0, 0, 0}; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(rows); + } + + gnitienko_k_sum_row_mpi::SumByRowMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector resSeq(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(rows); + + // Create Task + gnitienko_k_sum_row_mpi::SumByRowMPISeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, expected_sums); + ASSERT_EQ(resSeq, expected_sums); + } +} + +TEST(gnitienko_k_sum_values_by_rows_MPI, test_small_matrix) { + boost::mpi::communicator world; + + int cols = 1; + int rows = 1; + + std::vector global_vec = {15}; + std::vector resMPI(rows, 0); + std::vector expect = {15}; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(rows); + } + + gnitienko_k_sum_row_mpi::SumByRowMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector resSeq(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(rows); + + // Create Task + gnitienko_k_sum_row_mpi::SumByRowMPISeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, expect); + ASSERT_EQ(resSeq, expect); + } +} + +TEST(gnitienko_k_sum_values_by_rows_MPI, test_two_columns_one_row) { + boost::mpi::communicator world; + + int cols = 2; + int rows = 1; + + std::vector global_vec = {12, 10}; + std::vector resMPI(rows, 0); + std::vector expect = {22}; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(rows); + } + + gnitienko_k_sum_row_mpi::SumByRowMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector resSeq(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(rows); + + // Create Task + gnitienko_k_sum_row_mpi::SumByRowMPISeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, expect); + ASSERT_EQ(resSeq, expect); + } +} \ No newline at end of file diff --git a/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..43a681f317b --- /dev/null +++ b/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace gnitienko_k_sum_row_mpi { + +std::vector getRandomVector(int sz); + +class SumByRowMPISeq : public ppc::core::Task { + public: + explicit SumByRowMPISeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector mainFunc(); + std::vector input_{}, res{}; + int rows{}, cols{}; +}; + +class SumByRowMPIParallel : public ppc::core::Task { + public: + explicit SumByRowMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector mainFunc(int StartRow, int LastRow); + std::vector input_{}, res{}; + int rows{}, cols{}; + boost::mpi::communicator world; +}; + +} // namespace gnitienko_k_sum_row_mpi \ No newline at end of file diff --git a/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/perf_tests/main.cpp b/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..a2eb24da191 --- /dev/null +++ b/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,94 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/gnitienko_k_sum_values_by_rows_matrix/include/ops_mpi.hpp" + +TEST(gnitienko_k_sum_by_row_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sums; + std::vector expect; + + std::shared_ptr taskDataPar = std::make_shared(); + int rows; + int cols; + if (world.rank() == 0) { + rows = 10000; + cols = 10000; + expect.resize(rows, 10000); + global_matrix.resize(rows * cols, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(static_cast(rows)); + taskDataPar->inputs_count.emplace_back(static_cast(cols)); + global_sums.resize(rows, 0); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sums.data())); + taskDataPar->outputs_count.emplace_back(static_cast(global_sums.size())); + } + + auto sumByRowTask = std::make_shared(taskDataPar); + ASSERT_EQ(sumByRowTask->validation(), true); + sumByRowTask->pre_processing(); + sumByRowTask->run(); + sumByRowTask->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(sumByRowTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(global_sums, expect); + } +} + +TEST(gnitienko_k_sum_by_row_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sums; + std::vector expect; + + std::shared_ptr taskDataPar = std::make_shared(); + int rows; + int cols; + if (world.rank() == 0) { + rows = 10000; + cols = 10000; + expect.resize(rows, 10000); + global_matrix.resize(rows * cols, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(static_cast(rows)); + taskDataPar->inputs_count.emplace_back(static_cast(cols)); + global_sums.resize(rows, 0); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sums.data())); + taskDataPar->outputs_count.emplace_back(static_cast(global_sums.size())); + } + + auto sumByRowTask = std::make_shared(taskDataPar); + ASSERT_EQ(sumByRowTask->validation(), true); + sumByRowTask->pre_processing(); + sumByRowTask->run(); + sumByRowTask->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(sumByRowTask); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(global_sums, expect); + } +} \ No newline at end of file diff --git a/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..60e1c11514d --- /dev/null +++ b/tasks/mpi/gnitienko_k_sum_values_by_rows_matrix/src/ops_mpi.cpp @@ -0,0 +1,134 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/gnitienko_k_sum_values_by_rows_matrix/include/ops_mpi.hpp" + +#include +#include +#include + +std::vector gnitienko_k_sum_row_mpi::SumByRowMPISeq::mainFunc() { + for (int i = 0; i < rows; ++i) { + int sum = 0; + for (int j = 0; j < cols; ++j) { + sum += input_[i * cols + j]; + } + res[i] = sum; + } + return res; +} + +std::vector gnitienko_k_sum_row_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +bool gnitienko_k_sum_row_mpi::SumByRowMPISeq::pre_processing() { + internal_order_test(); + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + input_.resize(rows * cols); + auto* ptr = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < cols; ++j) { + input_[i * cols + j] = ptr[i * cols + j]; + } + } + + res = std::vector(rows, 0); + return true; +} + +bool gnitienko_k_sum_row_mpi::SumByRowMPISeq::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs_count.size() == 2 && taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && + taskData->outputs_count[0] == taskData->inputs_count[0]); +} + +bool gnitienko_k_sum_row_mpi::SumByRowMPISeq::run() { + internal_order_test(); + mainFunc(); + return true; +} + +bool gnitienko_k_sum_row_mpi::SumByRowMPISeq::post_processing() { + internal_order_test(); + // reinterpret_cast(taskData->outputs[0])[0] = res; + memcpy(taskData->outputs[0], res.data(), rows * sizeof(int)); + return true; +} + +// Parallel + +bool gnitienko_k_sum_row_mpi::SumByRowMPIParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + input_.resize(rows * cols); + auto* ptr = reinterpret_cast(taskData->inputs[0]); + + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < cols; ++j) { + input_[i * cols + j] = ptr[i * cols + j]; + } + } + } + return true; +} + +bool gnitienko_k_sum_row_mpi::SumByRowMPIParallel::validation() { + internal_order_test(); + if (world.rank() == 0) + return (taskData->inputs_count.size() == 2 && taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && + taskData->outputs_count[0] == taskData->inputs_count[0]); + return true; +} + +std::vector gnitienko_k_sum_row_mpi::SumByRowMPIParallel::mainFunc(int startRow, int LastRow) { + std::vector result; + for (int i = startRow; i < LastRow; i++) { + int sum_by_row = 0; + for (int j = 0; j < cols; j++) { + sum_by_row += input_[i * cols + j]; + } + result.push_back(sum_by_row); + } + return result; +} + +bool gnitienko_k_sum_row_mpi::SumByRowMPIParallel::run() { + internal_order_test(); + boost::mpi::broadcast(world, rows, 0); + boost::mpi::broadcast(world, cols, 0); + input_.resize(rows * cols); + boost::mpi::broadcast(world, input_.data(), rows * cols, 0); + int rows_per_process = rows / world.size(); + int extra_rows = rows % world.size(); + if (extra_rows != 0) rows_per_process += 1; + int process_last_row = std::min(rows, rows_per_process * (world.rank() + 1)); + std::vector local_sum = mainFunc(rows_per_process * world.rank(), process_last_row); + local_sum.resize(rows_per_process); + if (world.rank() == 0) { + std::vector local_res(rows + rows_per_process * world.size()); + std::vector sizes(world.size(), rows_per_process); + boost::mpi::gatherv(world, local_sum.data(), local_sum.size(), local_res.data(), sizes, 0); + local_res.resize(rows); + res = local_res; + } else { + boost::mpi::gatherv(world, local_sum.data(), local_sum.size(), 0); + } + return true; +} + +bool gnitienko_k_sum_row_mpi::SumByRowMPIParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + std::copy(res.begin(), res.end(), reinterpret_cast(taskData->outputs[0])); + } + return true; +} diff --git a/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/func_tests/main.cpp b/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..33b4c66aab9 --- /dev/null +++ b/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,187 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/gnitienko_k_sum_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(gnitienko_k_sum_row_seq, Test_rows_eq_cols) { + const int rows = 10; + const int cols = 10; + + // Create data + std::vector in(rows * cols, 0); + for (int i = 0; i < rows; ++i) { + in[i * cols] = i; + } + std::vector expect(rows, 0); + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < cols; ++j) { + expect[i] += in[i * cols + j]; + } + } + std::vector out(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(static_cast(rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(cols)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(out.size())); + + // Create Task + gnitienko_k_sum_row_seq::SumByRowSeq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expect, out); +} + +TEST(gnitienko_k_sum_row_seq, Test_zero_values) { + const int rows = 3; + const int cols = 3; + + // Create data + std::vector in(rows * cols, 0); + std::vector expect(rows, 0); + std::vector out(rows, 0); + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count = {static_cast(rows), static_cast(cols)}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count = {static_cast(out.size())}; + + // Create Task + gnitienko_k_sum_row_seq::SumByRowSeq testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expect, out); +} + +TEST(gnitienko_k_sum_row_seq, Test_arbitrary_values) { + const int rows = 2; + const int cols = 3; + + // Create data + std::vector in = {1, 2, 3, 4, 5, 6}; + std::vector expect = {6, 15}; + std::vector out(rows, 0); + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count = {static_cast(rows), static_cast(cols)}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count = {static_cast(out.size())}; + + // Create Task + gnitienko_k_sum_row_seq::SumByRowSeq testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expect, out); +} + +TEST(gnitienko_k_sum_row_seq, Test_negative_values) { + const int rows = 2; + const int cols = 2; + + // Create data + std::vector in = {-1, -2, -3, -4}; + std::vector expect = {-3, -7}; + std::vector out(rows, 0); + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count = {static_cast(rows), static_cast(cols)}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count = {static_cast(out.size())}; + + // Create Task + gnitienko_k_sum_row_seq::SumByRowSeq testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expect, out); +} + +TEST(gnitienko_k_sum_row_seq, Test_output_size) { + const int rows = 5; + const int cols = 3; + + // Create data + std::vector in(rows * cols, 1); + std::vector out(rows, 0); + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count = {static_cast(rows), static_cast(cols)}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count = {static_cast(out.size())}; + + // Create Task + gnitienko_k_sum_row_seq::SumByRowSeq testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out.size(), static_cast(rows)); +} + +TEST(gnitienko_k_sum_row_seq, Test_output_element) { + const int rows = 4; + const int cols = 4; + + // Create data + std::vector in = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + std::vector out(rows, 0); + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count = {static_cast(rows), static_cast(cols)}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count = {static_cast(out.size())}; + + // Create Task + gnitienko_k_sum_row_seq::SumByRowSeq testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[1], 26); +} + +TEST(gnitienko_k_sum_row_seq, Test_empty_input) { + const int rows = 0; + const int cols = 0; + + // Create data + std::vector in; + std::vector out; + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count = {static_cast(rows), static_cast(cols)}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count = {static_cast(out.size())}; + + // Create Task + gnitienko_k_sum_row_seq::SumByRowSeq testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_TRUE(out.empty()); +} diff --git a/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/include/ops_seq.hpp b/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..ae26bb3f1aa --- /dev/null +++ b/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/include/ops_seq.hpp @@ -0,0 +1,25 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace gnitienko_k_sum_row_seq { + +class SumByRowSeq : public ppc::core::Task { + public: + explicit SumByRowSeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector mainFunc(); + std::vector input_{}, res{}; + int rows{}, cols{}; +}; + +} // namespace gnitienko_k_sum_row_seq \ No newline at end of file diff --git a/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/perf_tests/main.cpp b/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..70f1d71cdf1 --- /dev/null +++ b/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,103 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/gnitienko_k_sum_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(gnitienko_k_sum_row_seq, test_pipeline_run) { + const int rows = 4000; + const int cols = 4000; + + // Create data + std::vector in(rows * cols, 0); + for (int i = 0; i < rows; ++i) { + in[i * cols] = i; + } + std::vector expect(rows, 0); + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < cols; ++j) { + expect[i] += in[i * cols + j]; + } + } + std::vector out(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(static_cast(rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(cols)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(out.size())); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expect, out); +} + +TEST(gnitienko_k_sum_row_seq, test_task_run) { + const int rows = 4000; + const int cols = 4000; + + // Create data + std::vector in(rows * cols, 0); + for (int i = 0; i < rows; ++i) { + in[i * cols] = i; + } + std::vector expect(rows, 0); + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < cols; ++j) { + expect[i] += in[i * cols + j]; + } + } + std::vector out(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(static_cast(rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(cols)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(out.size())); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expect, out); +} diff --git a/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/src/ops_seq.cpp b/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..4ca9a53f189 --- /dev/null +++ b/tasks/seq/gnitienko_k_sum_values_by_rows_matrix/src/ops_seq.cpp @@ -0,0 +1,55 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/gnitienko_k_sum_values_by_rows_matrix/include/ops_seq.hpp" + +#include + +bool gnitienko_k_sum_row_seq::SumByRowSeq::pre_processing() { + internal_order_test(); + + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + + input_.resize(rows * cols); + auto* ptr = reinterpret_cast(taskData->inputs[0]); + + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < cols; ++j) { + input_[i * cols + j] = ptr[i * cols + j]; + } + } + + res = std::vector(rows, 0); + + return true; +} + +bool gnitienko_k_sum_row_seq::SumByRowSeq::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs_count.size() == 2 && taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && + taskData->outputs_count[0] == taskData->inputs_count[0]); +} + +std::vector gnitienko_k_sum_row_seq::SumByRowSeq::mainFunc() { + for (int i = 0; i < rows; ++i) { + int sum = 0; + for (int j = 0; j < cols; ++j) { + sum += input_[i * cols + j]; + } + res[i] = sum; + } + return res; +} + +bool gnitienko_k_sum_row_seq::SumByRowSeq::run() { + internal_order_test(); + mainFunc(); + return true; +} + +bool gnitienko_k_sum_row_seq::SumByRowSeq::post_processing() { + internal_order_test(); + // reinterpret_cast(taskData->outputs[0])[0] = res; + memcpy(taskData->outputs[0], res.data(), rows * sizeof(int)); + return true; +} From 6e27a53439b475ae2e94d0889aa4806868d79dde Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Sun, 3 Nov 2024 12:00:52 +0800 Subject: [PATCH 057/155] =?UTF-8?q?Revert=20"=D0=92=D0=BB=D0=B0=D0=B4?= =?UTF-8?q?=D0=B8=D0=BC=D0=B8=D1=80=D0=BE=D0=B2=D0=B0=20=D0=AE=D0=BB=D0=B8?= =?UTF-8?q?=D1=8F.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2013.=20=D0=9C=D0=B0=D0=BA?= =?UTF-8?q?=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7?= =?UTF-8?q?=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5?= =?UTF-8?q?=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=BC=D0=B0=D1=82=D1=80?= =?UTF-8?q?=D0=B8=D1=86=D1=8B."=20(#154)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#70 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11642652525/job/32422497972 image image --- .../func_tests/main.cpp | 590 ------------------ .../include/ops_mpi.hpp | 48 -- .../perf_tests/main.cpp | 136 ---- .../src/ops_mpi.cpp | 139 ----- .../func_tests/main.cpp | 400 ------------ .../include/ops_seq.hpp | 25 - .../perf_tests/main.cpp | 119 ---- .../src/ops_seq.cpp | 51 -- 8 files changed, 1508 deletions(-) delete mode 100644 tasks/mpi/vladimirova_j_max_of_vector_elements/func_tests/main.cpp delete mode 100644 tasks/mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp delete mode 100644 tasks/mpi/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp delete mode 100644 tasks/mpi/vladimirova_j_max_of_vector_elements/src/ops_mpi.cpp delete mode 100644 tasks/seq/vladimirova_j_max_of_vector_elements/func_tests/main.cpp delete mode 100644 tasks/seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp delete mode 100644 tasks/seq/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp delete mode 100644 tasks/seq/vladimirova_j_max_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/vladimirova_j_max_of_vector_elements/func_tests/main.cpp b/tasks/mpi/vladimirova_j_max_of_vector_elements/func_tests/main.cpp deleted file mode 100644 index a9fb6783b4f..00000000000 --- a/tasks/mpi/vladimirova_j_max_of_vector_elements/func_tests/main.cpp +++ /dev/null @@ -1,590 +0,0 @@ -#include - -#include -#include -#include -#include - -#include "mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp" - -std::vector CreateVector(size_t size, size_t spread_of_val) { - // Init value for input and output - std::random_device dev; - std::mt19937 random(dev()); - std::vector v(size); - for (size_t i = 0; i < size; i++) { - v[i] = (random() % (2 * spread_of_val + 1)) - spread_of_val; - } - return v; -} - -std::vector> CreateInputMatrix(size_t row_c, size_t col_c, size_t spread_of_val) { - std::vector> m(row_c); - for (size_t i = 0; i < row_c; i++) { - m[i] = CreateVector(col_c, spread_of_val); - } - return m; -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_ValMatrix_0) { - const size_t size = 0; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(size, size, spread); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr.data())); - taskDataPar->inputs_count.emplace_back(size); - taskDataPar->inputs_count.emplace_back(size); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), false); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_CanCreate_10) { EXPECT_NO_THROW(CreateInputMatrix(10, 10, 10)); } - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_1) { - const size_t size = 1; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(size, size, spread); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(size); - taskDataPar->inputs_count.emplace_back(size); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_1_2) { - const size_t row = 1; - const size_t col = 2; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(row, col, spread); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(row); - taskDataPar->inputs_count.emplace_back(col); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(row); - taskDataSeq->inputs_count.emplace_back(col); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_3_1) { - const size_t row = 1; - const size_t col = 2; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(row, col, spread); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(row); - taskDataPar->inputs_count.emplace_back(col); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(row); - taskDataSeq->inputs_count.emplace_back(col); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_10) { - const size_t size = 10; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(size, size, spread); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(size); - taskDataPar->inputs_count.emplace_back(size); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_50) { - const size_t size = 50; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(size, size, spread); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(size); - taskDataPar->inputs_count.emplace_back(size); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_100) { - const size_t size = 200; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(size, size, spread); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(size); - taskDataPar->inputs_count.emplace_back(size); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_10_50) { - const size_t row = 10; - const size_t col = 50; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(row, col, spread); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(row); - taskDataPar->inputs_count.emplace_back(col); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(row); - taskDataSeq->inputs_count.emplace_back(col); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_100_50) { - const size_t row = 100; - const size_t col = 50; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(row, col, spread); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(row); - taskDataPar->inputs_count.emplace_back(col); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(row); - taskDataSeq->inputs_count.emplace_back(col); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_SquareMatrix_50_WithSeveralMax) { - const size_t size = 50; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(size, size, spread); - global_matr[0][0] = spread; - global_matr[5][25] = spread; - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(size); - taskDataPar->inputs_count.emplace_back(size); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, Test_Matrix_100_50_WithSeveralMax) { - const size_t row = 100; - const size_t col = 50; - const size_t spread = 30; - - boost::mpi::communicator world; - std::vector> global_matr; - std::vector global_max(1, -((int)(spread + 10))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = CreateInputMatrix(row, col, spread); - global_matr[25][10] = spread; - global_matr[5][25] = spread; - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataPar->inputs_count.emplace_back(row); - taskDataPar->inputs_count.emplace_back(col); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matr[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - taskDataSeq->inputs_count.emplace_back(row); - taskDataSeq->inputs_count.emplace_back(col); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} diff --git a/tasks/mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp deleted file mode 100644 index 4db9e016444..00000000000 --- a/tasks/mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp +++ /dev/null @@ -1,48 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace vladimirova_j_max_of_vector_elements_mpi { - -int FindMaxElem(std::vector m); - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - int res{}; - std::string ops; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - int res{}; - std::string ops; - boost::mpi::communicator world; -}; - -} // namespace vladimirova_j_max_of_vector_elements_mpi diff --git a/tasks/mpi/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp deleted file mode 100644 index c1688d3cca9..00000000000 --- a/tasks/mpi/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp +++ /dev/null @@ -1,136 +0,0 @@ -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp" - -std::vector CreateVector(size_t size, size_t spread_of_val) { - // Init value for input and output - std::random_device dev; - std::mt19937 random(dev()); - std::vector v(size); - for (size_t i = 0; i < size; i++) { - v[i] = (random() % (2 * spread_of_val + 1)) - spread_of_val; - } - return v; -} - -std::vector> CreateInputMatrix(size_t row_c, size_t col_c, size_t spread_of_val) { - std::vector> m(row_c); - for (size_t i = 0; i < row_c; i++) { - m[i] = CreateVector(col_c, spread_of_val); - } - return m; -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, test_pipeline_run) { - int row = 7000; - int col = 7000; - int spread = 7000; - - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)spread + 10)); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - std::random_device dev; - std::mt19937 random(dev()); - - global_matrix = CreateInputMatrix(row, col, spread); - int some_row = random() % row; - int some_column = random() % col; - global_matrix[some_row][some_column] = spread; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(row); - taskDataPar->inputs_count.emplace_back(col); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(spread, global_max[0]); - } -} - -TEST(vladimirova_j_max_of_vector_elements_mpi, test_task_run) { - int row = 7000; - int col = 7000; - int spread = 7000; - - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)spread + 10)); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - std::random_device dev; - std::mt19937 random(dev()); - - global_matrix = CreateInputMatrix(row, col, spread); - int some_row = random() % row; - int some_column = random() % col; - global_matrix[some_row][some_column] = spread; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(row); - taskDataPar->inputs_count.emplace_back(col); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(spread, global_max[0]); - } -} diff --git a/tasks/mpi/vladimirova_j_max_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/vladimirova_j_max_of_vector_elements/src/ops_mpi.cpp deleted file mode 100644 index d4562910623..00000000000 --- a/tasks/mpi/vladimirova_j_max_of_vector_elements/src/ops_mpi.cpp +++ /dev/null @@ -1,139 +0,0 @@ -#include "mpi/vladimirova_j_max_of_vector_elements/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -int vladimirova_j_max_of_vector_elements_mpi::FindMaxElem(std::vector m) { - if (m.empty()) return INT_MIN; - int max_elem = m[0]; - for (int &i : m) { - if (i > max_elem) { - max_elem = i; - } - } - return max_elem; -} - -bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - - input_ = std::vector(taskData->inputs_count[0] * taskData->inputs_count[1]); - - for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { - auto *input_data = reinterpret_cast(taskData->inputs[i]); - for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { - input_[i * taskData->inputs_count[1] + j] = input_data[j]; - } - } - return true; -} - -bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - return (taskData->inputs_count[0] > 0) && (taskData->inputs_count[1] > 0) && (taskData->outputs_count[0] == 1); -} - -bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential::run() { - internal_order_test(); - - res = vladimirova_j_max_of_vector_elements_mpi::FindMaxElem(input_); - return true; -} - -bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} - -bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - if (world.rank() == 0) { - unsigned int rows = taskData->inputs_count[0]; - unsigned int columns = taskData->inputs_count[1]; - - input_ = std::vector(rows * columns); - - for (unsigned int i = 0; i < rows; i++) { - auto *input_data = reinterpret_cast(taskData->inputs[i]); - for (unsigned int j = 0; j < columns; j++) { - input_[i * columns + j] = input_data[j]; - } - } - } - - return true; -} - -bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - - return (world.rank() != 0) || - ((taskData->outputs_count[0] == 1) && (taskData->inputs_count[0] > 0) && (!taskData->inputs.empty())); -} - -bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - unsigned int delta = 0; - - if (world.rank() == 0) { - // Init vectors - - unsigned int rows = taskData->inputs_count[0]; - unsigned int columns = taskData->inputs_count[1]; - - delta = columns * rows / world.size(); - int div_r = columns * rows % world.size() + 1; - - if (delta == 0) { - for (int i = 1; i < world.size(); i++) { - world.send(i, 0, 0); - } - local_input_ = std::vector(input_.begin(), input_.begin() + div_r - 1); - res = vladimirova_j_max_of_vector_elements_mpi::FindMaxElem(local_input_); - return true; - } - for (int i = 1; i < world.size(); i++) { - world.send(i, 0, delta + (int)(i < div_r)); - } - - for (int i = 1; i < div_r; i++) { - world.send(i, 0, input_.data() + delta * i + i - 1, delta + 1); - } - for (int i = div_r; i < world.size(); i++) { - world.send(i, 0, input_.data() + delta * i + div_r - 1, delta); - } - - local_input_ = std::vector(input_.begin(), input_.begin() + delta); - } - - if (world.rank() != 0) { - world.recv(0, 0, delta); - if (delta == 0) return true; - local_input_ = std::vector(delta); - world.recv(0, 0, local_input_.data(), delta); - } - - int local_res = vladimirova_j_max_of_vector_elements_mpi::FindMaxElem(local_input_); - reduce(world, local_res, res, boost::mpi::maximum(), 0); - - return true; -} - -bool vladimirova_j_max_of_vector_elements_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = res; - } - return true; -} diff --git a/tasks/seq/vladimirova_j_max_of_vector_elements/func_tests/main.cpp b/tasks/seq/vladimirova_j_max_of_vector_elements/func_tests/main.cpp deleted file mode 100644 index 4b9615e38ff..00000000000 --- a/tasks/seq/vladimirova_j_max_of_vector_elements/func_tests/main.cpp +++ /dev/null @@ -1,400 +0,0 @@ -#include - -#include -#include - -#include "seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp" - -std::vector CreateVector(size_t size, size_t spread_of_val) { - std::random_device dev; - std::mt19937 random(dev()); - std::vector v(size); - for (size_t i = 0; i < size; i++) { - v[i] = (random() % (2 * spread_of_val + 1)) - spread_of_val; - } - return v; -} - -std::vector> CreateInputMatrix(size_t row_c, size_t column_c, size_t spread_of_val) { - // Init value for input and output - std::vector> m(row_c); - for (size_t i = 0; i < row_c; i++) { - m[i] = CreateVector(column_c, spread_of_val); - } - return m; -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_ValMatrix_0) { - const size_t size = 0; - const int spread = 10; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(size, size, spread); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), false); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_CanCreate_10) { - const size_t col = 10; - const size_t row = 10; - const int spread = 10; - EXPECT_NO_THROW(CreateInputMatrix(row, col, spread)); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_1) { - const size_t size = 1; - const int spread = 10; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(size, size, spread); - - in[0][0] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_NotSquareMatrix_1_2) { - const size_t col = 1; - const size_t row = 2; - const int spread = 100; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(row, col, spread); - - std::random_device dev; - std::mt19937 random(dev()); - int some_row = random() % row; - in[some_row][0] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(row); - taskDataSeq->inputs_count.emplace_back(col); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_NotSquareMatrix_3_1) { - const size_t col = 3; - const size_t row = 1; - const int spread = 100; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(row, col, spread); - - std::random_device dev; - std::mt19937 random(dev()); - int some_col = random() % col; - in[0][some_col] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(row); - taskDataSeq->inputs_count.emplace_back(col); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_10) { - const size_t size = 10; - const int spread = 10; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(size, size, spread); - - std::random_device dev; - std::mt19937 random(dev()); - int some_row = random() % size; - int some_col = random() % size; - in[some_row][some_col] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_20) { - const size_t size = 20; - const int spread = 50; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(size, size, spread); - - std::random_device dev; - std::mt19937 random(dev()); - int some_row = random() % size; - int some_col = random() % size; - in[some_row][some_col] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_50) { - const size_t size = 50; - const int spread = 50; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(size, size, spread); - - std::random_device dev; - std::mt19937 random(dev()); - int some_row = random() % size; - int some_col = random() % size; - in[some_row][some_col] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_100) { - const size_t size = 100; - const int spread = 100; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(size, size, spread); - - std::random_device dev; - std::mt19937 random(dev()); - int some_row = random() % size; - int some_col = random() % size; - in[some_row][some_col] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_SquareMatrix_100_WithSeveralMax) { - const size_t size = 100; - const int spread = 100; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(size, size, spread); - - std::random_device dev; - std::mt19937 random(dev()); - int some_row = random() % size; - int some_col = random() % size; - in[some_row][some_col] = spread; - some_row = random() % size; - some_col = random() % size; - in[some_row][some_col] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_NotSquareMatrix_100_50_WithSeveralMax) { - const size_t col = 100; - const size_t row = 50; - const int spread = 100; // spread is excepted answer - - // Create data - std::vector out(1, -110); - std::vector> in = CreateInputMatrix(row, col, spread); - - std::random_device dev; - std::mt19937 random(dev()); - int some_row = random() % row; - int some_col = random() % col; - in[some_row][some_col] = spread; - some_row = random() % row; - some_col = random() % col; - in[some_row][some_col] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(row); - taskDataSeq->inputs_count.emplace_back(col); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} - -TEST(vladimirova_j_max_of_vector_elements_seq, Test_NotSquareMatrix_100_50) { - const size_t col = 100; - const size_t row = 50; - const int spread = 100; // spread is excepted answer - - // Create data - std::vector out(1, -((int)spread + 10)); - std::vector> in = CreateInputMatrix(row, col, spread); - - std::random_device dev; - std::mt19937 random(dev()); - int some_row = random() % row; - int some_col = random() % col; - in[some_row][some_col] = spread; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(row); - taskDataSeq->inputs_count.emplace_back(col); - - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - vladimirova_j_max_of_vector_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); - ASSERT_EQ(TestTaskSequential.validation(), true); - TestTaskSequential.pre_processing(); - TestTaskSequential.run(); - TestTaskSequential.post_processing(); - - ASSERT_EQ(spread, out[0]); -} diff --git a/tasks/seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp b/tasks/seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp deleted file mode 100644 index 35f46742500..00000000000 --- a/tasks/seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace vladimirova_j_max_of_vector_elements_seq { - -int FindMaxElem(std::vector m); - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int res{}; - std::vector input_; -}; - -} // namespace vladimirova_j_max_of_vector_elements_seq diff --git a/tasks/seq/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp b/tasks/seq/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp deleted file mode 100644 index b4dce9a9f6e..00000000000 --- a/tasks/seq/vladimirova_j_max_of_vector_elements/perf_tests/main.cpp +++ /dev/null @@ -1,119 +0,0 @@ -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp" - -std::vector CreateVector(size_t size, size_t spread_of_val) { - std::random_device dev; - std::mt19937 random(dev()); - std::vector v(size); - for (size_t i = 0; i < size; i++) { - v[i] = (random() % (2 * spread_of_val + 1)) - spread_of_val; - } - return v; -} - -std::vector> CreateInputMatrix(size_t row_c, size_t column_c, size_t spread_of_val) { - // Init value for input and output - std::vector> m(row_c); - for (size_t i = 0; i < row_c; i++) { - m[i] = CreateVector(column_c, spread_of_val); - } - return m; -} - -TEST(vladimirova_j_max_of_vector_elements_seq, test_pipeline_run) { - std::random_device dev; - std::mt19937 random(dev()); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - int size = 7000; - int spread = 7000; - - std::vector> matrix_in; - matrix_in = CreateInputMatrix(size, size, spread); - std::vector out(1, matrix_in[0][0]); - - int some_row = random() % size; - int some_col = random() % size; - matrix_in[some_row][some_col] = spread; - - for (unsigned int i = 0; i < matrix_in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix_in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(spread, out[0]); -} - -TEST(sequential_vladimirova_j_max_of_vector_elements_seq, test_task_run) { - std::random_device dev; - std::mt19937 random(dev()); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - int size = 7000; - int spread = 7000; - std::vector> matrix_in; - matrix_in = CreateInputMatrix(size, size, spread); - std::vector out(1, matrix_in[0][0]); - - int some_row = random() % size; - int some_col = random() % size; - matrix_in[some_row][some_col] = spread; - - for (unsigned int i = 0; i < matrix_in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix_in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(spread, out[0]); -} diff --git a/tasks/seq/vladimirova_j_max_of_vector_elements/src/ops_seq.cpp b/tasks/seq/vladimirova_j_max_of_vector_elements/src/ops_seq.cpp deleted file mode 100644 index 8f46c9a5826..00000000000 --- a/tasks/seq/vladimirova_j_max_of_vector_elements/src/ops_seq.cpp +++ /dev/null @@ -1,51 +0,0 @@ -#include "seq/vladimirova_j_max_of_vector_elements/include/ops_seq.hpp" - -#include -#include - -using namespace std::chrono_literals; - -int vladimirova_j_max_of_vector_elements_seq::FindMaxElem(std::vector m) { - if (m.empty()) return INT_MIN; - int max_elem = m[0]; - for (int& i : m) { - if (i > max_elem) { - max_elem = i; - } - } - return max_elem; -} - -bool vladimirova_j_max_of_vector_elements_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - - input_ = std::vector(taskData->inputs_count[0] * taskData->inputs_count[1]); - - for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { - auto* input_data = reinterpret_cast(taskData->inputs[i]); - for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { - input_[i * taskData->inputs_count[1] + j] = input_data[j]; - } - } - return true; -} - -bool vladimirova_j_max_of_vector_elements_seq::TestTaskSequential::validation() { - internal_order_test(); - - return ((taskData->inputs_count[0] > 0) && (taskData->inputs_count[1] > 0)) && (taskData->outputs_count[0] == 1); -} - -bool vladimirova_j_max_of_vector_elements_seq::TestTaskSequential::run() { - internal_order_test(); - - res = vladimirova_j_max_of_vector_elements_seq::FindMaxElem(input_); - return true; -} - -bool vladimirova_j_max_of_vector_elements_seq::TestTaskSequential::post_processing() { - internal_order_test(); - - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} From c5cac8d7d9a250d35e9c1d94c3deebd0f2105f18 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Sun, 3 Nov 2024 12:09:24 +0800 Subject: [PATCH 058/155] =?UTF-8?q?Revert=20"=D0=9A=D1=83=D0=B4=D1=80?= =?UTF-8?q?=D1=8F=D1=88=D0=BE=D0=B2=D0=B0=20=D0=98=D1=80=D0=B8=D0=BD=D0=B0?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=209.=20=D0=A1=D0=BA=D0=B0=D0=BB?= =?UTF-8?q?=D1=8F=D1=80=D0=BD=D0=BE=D0=B5=20=D0=BF=D1=80=D0=BE=D0=B8=D0=B7?= =?UTF-8?q?=D0=B2=D0=B5=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=B2=D0=B5=D0=BA?= =?UTF-8?q?=D1=82=D0=BE=D1=80=D0=BE=D0=B2."=20(#155)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#69 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11648304705/job/32434575279 image --- .../vectorDotProductMPIFuncTests.cpp | 280 ------------------ .../include/vectorDotProductMPI.hpp | 36 --- .../vectorDotProductMPIPerfTests.cpp | 90 ------ .../src/vectorDotProductMPI.cpp | 122 -------- .../vectorDotProductSeqFuncTests.cpp | 144 --------- .../include/vectorDotProductSeq.hpp | 19 -- .../vectorDotProductSeqPerfTests.cpp | 70 ----- .../src/vectorDotProductSeq.cpp | 46 --- 8 files changed, 807 deletions(-) delete mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp delete mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp delete mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp delete mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp delete mode 100644 tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp delete mode 100644 tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp delete mode 100644 tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp delete mode 100644 tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp b/tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp deleted file mode 100644 index 5beb915e059..00000000000 --- a/tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp +++ /dev/null @@ -1,280 +0,0 @@ -#include - -#include - -#include "mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp" - -static int seedOffset = 0; - -std::vector GetRandomVector(int size) { - std::vector vector(size); - std::srand(static_cast(time(nullptr)) + ++seedOffset); - for (int i = 0; i < size; ++i) { - vector[i] = std::rand() % 100 + 1; - } - return vector; -} - -TEST(kudryashova_i_vector_dot_product_mpi, mpi_vectorDotProduct) { - std::vector vector1 = {8, 7, 6}; - std::vector vector2 = {3, 2, 1}; - ASSERT_EQ(44, kudryashova_i_vector_dot_product_mpi::vectorDotProduct(vector1, vector2)); -} - -TEST(kudryashova_i_vector_dot_product_mpi, scalar_multiply_vector_120) { - boost::mpi::communicator world; - std::vector> global_vector; - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - const int count_size_vector = 120; - std::vector vector1 = GetRandomVector(count_size_vector); - std::vector vector2 = GetRandomVector(count_size_vector); - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - } - kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - std::vector reference(1, 0); - std::shared_ptr taskDataSeq = std::make_shared(); - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataSeq->inputs_count.emplace_back(global_vector[0].size()); - taskDataSeq->inputs_count.emplace_back(global_vector[1].size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); - taskDataSeq->outputs_count.emplace_back(reference.size()); - kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); - ASSERT_EQ(reference[0], result[0]); - } -} - -TEST(kudryashova_i_vector_dot_product_mpi, scalar_multiply_vector_360) { - boost::mpi::communicator world; - std::vector> global_vector; - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - const int count_size_vector = 360; - std::vector vector1 = GetRandomVector(count_size_vector); - std::vector vector2 = GetRandomVector(count_size_vector); - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - } - kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - std::vector reference(1, 0); - std::shared_ptr taskDataSeq = std::make_shared(); - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataSeq->inputs_count.emplace_back(global_vector[0].size()); - taskDataSeq->inputs_count.emplace_back(global_vector[1].size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); - taskDataSeq->outputs_count.emplace_back(reference.size()); - kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); - ASSERT_EQ(reference[0], result[0]); - } -} - -TEST(kudryashova_i_vector_dot_product_mpi, check_vectors_equal) { - boost::mpi::communicator world; - std::vector> global_vector; - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - const int count_size_vector = 100; - std::vector vector1 = GetRandomVector(count_size_vector); - std::vector vector2 = GetRandomVector(count_size_vector); - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - } - kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); -} - -TEST(kudryashova_i_vector_dot_product_mpi, check_not_equal_vectors) { - boost::mpi::communicator world; - std::vector> global_vector; - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - const int count_size_vector = 100; - std::vector vector1 = GetRandomVector(count_size_vector + 1); - std::vector vector2 = GetRandomVector(count_size_vector); - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), false); - } -} - -TEST(kudryashova_i_vector_dot_product_mpi, check_vectors_dot_product) { - boost::mpi::communicator world; - std::vector> global_vector; - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - const int count_size_vector = 100; - std::vector vector1 = GetRandomVector(count_size_vector); - std::vector vector2 = GetRandomVector(count_size_vector); - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - } -} - -TEST(kudryashova_i_vector_dot_product_mpi, check_dot_product_empty_vectors) { - boost::mpi::communicator world; - std::vector> global_vector; - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::vector vector1 = {}; - std::vector vector2 = {}; - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), false); - } -} - -TEST(kudryashova_i_vector_dot_product_mpi, check_dot_product_empty_and_nonempty_vectors) { - boost::mpi::communicator world; - std::vector> global_vector; - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::vector vector1 = {}; - std::vector vector2 = {1}; - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), false); - } -} - -TEST(kudryashova_i_vector_dot_product_mpi, scalar_multiply_vector_1_with_zero) { - boost::mpi::communicator world; - std::vector> global_vector; - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::vector vector1 = {0}; - std::vector vector2 = {1}; - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - } -} - -TEST(kudryashova_i_vector_dot_product_mpi, scalar_multiply_vector_1) { - boost::mpi::communicator world; - std::vector> global_vector; - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - const int count_size_vector = 1; - std::vector vector1 = GetRandomVector(count_size_vector); - std::vector vector2 = GetRandomVector(count_size_vector); - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - } - kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - std::vector reference(1, 0); - std::shared_ptr taskDataSeq = std::make_shared(); - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataSeq->inputs_count.emplace_back(global_vector[0].size()); - taskDataSeq->inputs_count.emplace_back(global_vector[1].size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); - taskDataSeq->outputs_count.emplace_back(reference.size()); - kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); - ASSERT_EQ(reference[0], result[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp b/tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp deleted file mode 100644 index 4f85f4ee8a6..00000000000 --- a/tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once -#include -#include -#include - -#include "core/task/include/task.hpp" -namespace kudryashova_i_vector_dot_product_mpi { -int vectorDotProduct(const std::vector& vector1, const std::vector& vector2); -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - int result{}; -}; -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - std::vector local_input1_, local_input2_; - int result{}; - boost::mpi::communicator world; - unsigned int delta; -}; -} // namespace kudryashova_i_vector_dot_product_mpi diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp b/tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp deleted file mode 100644 index a05b89e1e4e..00000000000 --- a/tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp +++ /dev/null @@ -1,90 +0,0 @@ -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp" - -static int seedOffset = 0; -std::vector GetRandomVector(int size) { - std::vector vector(size); - std::srand(static_cast(time(nullptr)) + ++seedOffset); - for (int i = 0; i < size; ++i) { - vector[i] = std::rand() % 100 + 1; - } - return vector; -} - -TEST(kudryashova_i_vector_dot_product_mpi, test_pipeline_run) { - const int count = 15000000; - boost::mpi::communicator world; - std::vector> global_vector; - std::vector vector1 = GetRandomVector(count); - std::vector vector2 = GetRandomVector(count); - std::vector result(1, 0); - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - } - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); - } -} - -TEST(kudryashova_i_vector_dot_product_mpi, test_task_run) { - const int count_size_vector = 15000000; - boost::mpi::communicator world; - std::vector> global_vector; - std::vector vector1 = GetRandomVector(count_size_vector); - std::vector vector2 = GetRandomVector(count_size_vector); - std::vector result(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - global_vector = {vector1, vector2}; - for (size_t i = 0; i < global_vector.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); - } - taskDataPar->inputs_count.emplace_back(global_vector[0].size()); - taskDataPar->inputs_count.emplace_back(global_vector[1].size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataPar->outputs_count.emplace_back(result.size()); - } - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); - } -} diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp b/tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp deleted file mode 100644 index cf9e1f0c150..00000000000 --- a/tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp +++ /dev/null @@ -1,122 +0,0 @@ -#include "mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp" - -#include -#include - -int kudryashova_i_vector_dot_product_mpi::vectorDotProduct(const std::vector& vector1, - const std::vector& vector2) { - long long result = 0; - for (unsigned long i = 0; i < vector1.size(); i++) result += vector1[i] * vector2[i]; - return result; -} - -bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - - input_.resize(taskData->inputs.size()); - for (unsigned long i = 0; i < input_.size(); ++i) { - auto* tempPtr = reinterpret_cast(taskData->inputs[i]); - input_[i] = std::vector(taskData->inputs_count[i]); - std::copy(tempPtr, tempPtr + taskData->inputs_count[i], input_[i].begin()); - } - return true; -} - -bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - return (taskData->inputs_count[0] == taskData->inputs_count[1]) && - (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && - taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && - taskData->outputs.size() == 1 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0; -} - -bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::run() { - internal_order_test(); - for (unsigned long i = 0; i < input_[0].size(); i++) { - result += input_[1][i] * input_[0][i]; - } - return true; -} - -bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = result; - return true; -} - -bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - if (world.rank() == 0) { - delta = taskData->inputs_count[0] / world.size(); - if ((int)(taskData->inputs_count[0]) < world.size()) { - delta = taskData->inputs_count[0]; - } - } - if (world.rank() == 0) { - input_.resize(taskData->inputs.size()); - for (size_t i = 0; i < taskData->inputs.size(); ++i) { - if (taskData->inputs[i] == nullptr || taskData->inputs_count[i] == 0) { - return false; - } - input_[i].resize(taskData->inputs_count[i]); - int* source_ptr = reinterpret_cast(taskData->inputs[i]); - - std::copy(source_ptr, source_ptr + taskData->inputs_count[i], input_[i].begin()); - } - } - return true; -} - -bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - return (taskData->inputs_count[0] == taskData->inputs_count[1]) && - (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && - taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && - taskData->outputs.size() == 1 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0; - } - return true; -} - -bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::run() { - internal_order_test(); - broadcast(world, delta, 0); - if (world.rank() == 0) { - for (int proc = 1; proc < world.size(); ++proc) { - world.send(proc, 0, input_[0].data() + proc * delta, delta); - world.send(proc, 1, input_[1].data() + proc * delta, delta); - } - } - local_input1_.resize(delta); - local_input2_.resize(delta); - if (world.rank() == 0) { - std::copy(input_[0].begin(), input_[0].begin() + delta, local_input1_.begin()); - std::copy(input_[1].begin(), input_[1].begin() + delta, local_input2_.begin()); - } else { - world.recv(0, 0, local_input1_.data(), delta); - world.recv(0, 1, local_input2_.data(), delta); - } - int local_result = std::inner_product(local_input1_.begin(), local_input1_.end(), local_input2_.begin(), 0); - std::vector full_results; - gather(world, local_result, full_results, 0); - - if (world.rank() == 0) { - result = std::accumulate(full_results.begin(), full_results.end(), 0); - } - if (world.rank() == 0 && (int)(taskData->inputs_count[0]) < world.size()) { - result = std::inner_product(input_[0].begin(), input_[0].end(), input_[1].begin(), 0); - } - return true; -} - -bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - if (!taskData->outputs.empty()) { - reinterpret_cast(taskData->outputs[0])[0] = result; - } else { - return false; - } - } - return true; -} \ No newline at end of file diff --git a/tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp b/tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp deleted file mode 100644 index 8565b68475e..00000000000 --- a/tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp +++ /dev/null @@ -1,144 +0,0 @@ -#include - -#include "seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp" - -static int seedOffset = 0; -std::vector GetRandomVector(int size) { - std::vector vector(size); - std::srand(static_cast(time(nullptr)) + ++seedOffset); - for (int i = 0; i < size; ++i) { - vector[i] = std::rand() % 100 + 1; - } - return vector; -} - -TEST(kudryashova_i_vector_dot_product_seq, check_vectorDotProduct) { - // Create data - std::vector vector1 = {1, 8, 14}; - std::vector vector2 = {3, 6, 5}; - ASSERT_EQ(121, kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2)); -} - -TEST(kudryashova_i_vector_dot_product_seq, scalar_multiply_vector_size_50) { - const int count = 50; - // Create data - std::vector vector1 = GetRandomVector(count); - std::vector vector2 = GetRandomVector(count); - std::vector out(1, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); - taskDataSeq->inputs_count.emplace_back(vector1.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); - taskDataSeq->inputs_count.emplace_back(vector2.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); -} - -TEST(kudryashova_i_vector_dot_product_seq, scalar_multiply_vector_size_120) { - const int count = 120; - // Create data - std::vector out(1, 0); - std::vector vector1 = GetRandomVector(count); - std::vector vector2 = GetRandomVector(count); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); - taskDataSeq->inputs_count.emplace_back(vector1.size()); - taskDataSeq->inputs_count.emplace_back(vector2.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); -} - -TEST(kudryashova_i_vector_dot_product_seq, check_equal_vectors) { - const int count = 10; - // Create data - std::vector out(1, 0); - std::vector vector1 = GetRandomVector(count); - std::vector vector2 = GetRandomVector(count); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); - taskDataSeq->inputs_count.emplace_back(vector1.size()); - taskDataSeq->inputs_count.emplace_back(vector2.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); -} - -TEST(kudryashova_i_vector_dot_product_seq, checks_not_equal_vector) { - const int count = 10; - // Create data - std::vector vector1 = GetRandomVector(count); - std::vector vector2 = GetRandomVector(count + 1); - std::vector out(1, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); - taskDataSeq->inputs_count.emplace_back(vector1.size()); - taskDataSeq->inputs_count.emplace_back(vector2.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(kudryashova_i_vector_dot_product_seq, check_empty_vectors) { - // Create data - std::vector vector1 = {}; - std::vector vector2 = {}; - std::vector out(1, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); - taskDataSeq->inputs_count.emplace_back(vector1.size()); - taskDataSeq->inputs_count.emplace_back(vector2.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(kudryashova_i_vector_dot_product_seq, check_run) { - // Create data - std::vector out(1, 0); - std::vector vector1 = {1, 8, 14}; - std::vector vector2 = {3, 6, 5}; - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); - taskDataSeq->inputs_count.emplace_back(vector1.size()); - taskDataSeq->inputs_count.emplace_back(vector2.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(121, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp b/tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp deleted file mode 100644 index 7e7e7a07a02..00000000000 --- a/tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once -#include - -#include "core/task/include/task.hpp" -namespace kudryashova_i_vector_dot_product { -int vectorDotProduct(const std::vector& vector1, const std::vector& vector2); -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_{}; - int result{}; -}; -} // namespace kudryashova_i_vector_dot_product diff --git a/tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp b/tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp deleted file mode 100644 index 6a6e3051f7c..00000000000 --- a/tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp +++ /dev/null @@ -1,70 +0,0 @@ -#include - -#include "core/perf/include/perf.hpp" -#include "seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp" - -static int seedOffset = 0; -std::vector GetRandomVector(int size) { - std::vector vector(size); - std::srand(static_cast(time(nullptr)) + ++seedOffset); - for (int i = 0; i < size; ++i) { - vector[i] = std::rand() % 100 + 1; - } - return vector; -} - -TEST(kudryashova_i_vector_dot_product_seq, test_pipeline_run) { - const int count_size = 15000000; - std::vector vector1 = GetRandomVector(count_size); - std::vector vector2 = GetRandomVector(count_size); - std::vector out(1, 0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); - taskDataSeq->inputs_count.emplace_back(vector1.size()); - taskDataSeq->inputs_count.emplace_back(vector2.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - auto testTaskSequential = std::make_shared(taskDataSeq); - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); -} - -TEST(kudryashova_i_vector_dot_product_seq, test_task_run) { - const int count = 15000000; - std::vector vector1 = GetRandomVector(count); - std::vector vector2 = GetRandomVector(count); - std::vector out(1, 0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); - taskDataSeq->inputs_count.emplace_back(vector1.size()); - taskDataSeq->inputs_count.emplace_back(vector2.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - auto testTaskSequential = std::make_shared(taskDataSeq); - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); -} \ No newline at end of file diff --git a/tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp b/tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp deleted file mode 100644 index f8d3c5cbfcc..00000000000 --- a/tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp +++ /dev/null @@ -1,46 +0,0 @@ -#include "seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp" - -#include - -int kudryashova_i_vector_dot_product::vectorDotProduct(const std::vector& vector1, - const std::vector& vector2) { - long long result = 0; - for (unsigned long i = 0; i < vector1.size(); ++i) { - result += vector1[i] * vector2[i]; - } - return result; -} - -bool kudryashova_i_vector_dot_product::TestTaskSequential::pre_processing() { - internal_order_test(); - - input_.resize(taskData->inputs.size()); - for (unsigned long i = 0; i < input_.size(); ++i) { - auto* tempPtr = reinterpret_cast(taskData->inputs[i]); - input_[i] = std::vector(taskData->inputs_count[i]); - std::copy(tempPtr, tempPtr + taskData->inputs_count[i], input_[i].begin()); - } - return true; -} - -bool kudryashova_i_vector_dot_product::TestTaskSequential::validation() { - internal_order_test(); - return (taskData->inputs_count[0] == taskData->inputs_count[1]) && - (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && - taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && - taskData->outputs.size() == 1 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0; -} - -bool kudryashova_i_vector_dot_product::TestTaskSequential::run() { - internal_order_test(); - for (unsigned long i = 0; i < input_[0].size(); i++) { - result += input_[1][i] * input_[0][i]; - } - return true; -} - -bool kudryashova_i_vector_dot_product::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = result; - return true; -} \ No newline at end of file From 3a896e34f0cd97d89c9515999ac403cc5b533a9a Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Sun, 3 Nov 2024 18:34:29 +0800 Subject: [PATCH 059/155] =?UTF-8?q?Revert=20"=D0=9A=D0=BE=D1=80=D0=BE?= =?UTF-8?q?=D0=B2=D0=B8=D0=BD=20=D0=9D=D0=B8=D0=BA=D0=B8=D1=82=D0=B0.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2017.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D1=80?= =?UTF-8?q?=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86?= =?UTF-8?q?=D1=8B."=20(#159)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#25 https://github.com/ChastovSlava/ppc-2024-autumn/actions/runs/11650359397/job/32438990922?pr=2 image image --- .../func_tests/main.cpp | 373 ------------------ .../include/ops_mpi.hpp | 48 --- .../perf_tests/main.cpp | 86 ---- .../src/ops_mpi.cpp | 182 --------- .../func_tests/main.cpp | 292 -------------- .../include/ops_seq.hpp | 28 -- .../perf_tests/main.cpp | 95 ----- .../src/ops_seq.cpp | 77 ---- 8 files changed, 1181 deletions(-) delete mode 100644 tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp delete mode 100644 tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp delete mode 100644 tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp delete mode 100644 tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp delete mode 100644 tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp delete mode 100644 tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp delete mode 100644 tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp delete mode 100644 tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp b/tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp deleted file mode 100644 index 1d1f282d360..00000000000 --- a/tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include - -#include "mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp" - -TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_10x10_matrix) { - boost::mpi::communicator world; - const int count_rows = 10; - const int count_columns = 10; - - std::vector> global_matrix; - std::vector global_min(count_rows, INT_MAX); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count = {count_rows, count_columns}; - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_min(count_rows, INT_MAX); - std::shared_ptr taskDataSeq = std::make_shared(); - - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count = {count_rows, count_columns}; - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (int i = 0; i < count_rows; i++) { - ASSERT_EQ(global_min[i], INT_MIN); - } - } -} - -TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_100x100_matrix) { - boost::mpi::communicator world; - const int count_rows = 100; - const int count_columns = 100; - - std::vector> global_matrix; - std::vector global_min(count_rows, INT_MAX); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count = {count_rows, count_columns}; - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_min(count_rows, INT_MAX); - std::shared_ptr taskDataSeq = std::make_shared(); - - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count = {count_rows, count_columns}; - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (int i = 0; i < count_rows; i++) { - ASSERT_EQ(global_min[i], INT_MIN); - } - } -} - -TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_100x500_matrix) { - boost::mpi::communicator world; - const int count_rows = 100; - const int count_columns = 500; - - std::vector> global_matrix; - std::vector global_min(count_rows, INT_MAX); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count = {count_rows, count_columns}; - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_min(count_rows, INT_MAX); - std::shared_ptr taskDataSeq = std::make_shared(); - - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count = {count_rows, count_columns}; - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (int i = 0; i < count_rows; i++) { - ASSERT_EQ(global_min[i], INT_MIN); - } - } -} - -TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_5000x5000_matrix) { - boost::mpi::communicator world; - const int count_rows = 5000; - const int count_columns = 5000; - - std::vector> global_matrix; - std::vector global_min(count_rows, INT_MAX); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count = {count_rows, count_columns}; - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_min(count_rows, INT_MAX); - std::shared_ptr taskDataSeq = std::make_shared(); - - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count = {count_rows, count_columns}; - - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (int i = 0; i < count_rows; i++) { - ASSERT_EQ(global_min[i], INT_MIN); - } - } -} - -TEST(korovin_n_min_val_row_matrix_mpi, validation_input_empty_100x100_matrix) { - boost::mpi::communicator world; - if (world.rank() == 0) { - const int rows = 100; - const int cols = 100; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testMpiTaskSequential.validation(), false); - } -} - -TEST(korovin_n_min_val_row_matrix_mpi, validation_output_empty_100x100_matrix) { - boost::mpi::communicator world; - if (world.rank() == 0) { - const int rows = 100; - const int cols = 100; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testMpiTaskSequential.validation(), false); - } -} - -TEST(korovin_n_min_val_row_matrix_mpi, validation_less_two_cols_100x100_matrix) { - boost::mpi::communicator world; - if (world.rank() == 0) { - const int rows = 100; - const int cols = 100; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testMpiTaskSequential.validation(), false); - } -} - -TEST(korovin_n_min_val_row_matrix_mpi, validation_find_min_val_in_row_0x10_matrix) { - boost::mpi::communicator world; - if (world.rank() == 0) { - const int rows = 0; - const int cols = 10; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testMpiTaskSequential.validation(), false); - } -} - -TEST(korovin_n_min_val_row_matrix_mpi, validation_find_min_val_in_row_10x10_cols_0_matrix) { - boost::mpi::communicator world; - if (world.rank() == 0) { - const int rows = 10; - const int cols = 10; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(0); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testMpiTaskSequential.validation(), false); - } -} - -TEST(korovin_n_min_val_row_matrix_mpi, validation_fails_on_invalid_output_size) { - boost::mpi::communicator world; - if (world.rank() == 0) { - const int rows = 10; - const int cols = 10; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows - 1, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testMpiTaskSequential.validation(), false); - } -} diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp b/tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp deleted file mode 100644 index 863a412fa35..00000000000 --- a/tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace korovin_n_min_val_row_matrix_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - static std::vector generate_rnd_vector(int size, int lower_bound = 0, int upper_bound = 50); - static std::vector> generate_rnd_matrix(int rows, int cols); - - private: - std::vector> input_; - std::vector res_; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - std::vector> local_input_; - std::vector res_; - boost::mpi::communicator world; -}; - -} // namespace korovin_n_min_val_row_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp b/tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp deleted file mode 100644 index 435f0ecba63..00000000000 --- a/tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp" - -TEST(korovin_n_min_val_row_matrix_mpi, test_pipeline_run_min) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min; - - std::shared_ptr taskDataPar = std::make_shared(); - int count_rows; - int count_columns; - - if (world.rank() == 0) { - count_rows = 5000; - count_columns = 5000; - global_matrix = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); - global_min.resize(count_rows, INT_MAX); - - for (auto& row : global_matrix) { - taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); - } - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - if (world.rank() == 0) { - for (size_t i = 0; i < global_min.size(); ++i) { - ASSERT_EQ(global_min[i], INT_MIN); - } - } -} - -TEST(korovin_n_min_val_row_matrix_mpi_perf_test, test_task_run_min) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min; - - std::shared_ptr taskDataPar = std::make_shared(); - int count_rows; - int count_columns; - - if (world.rank() == 0) { - count_rows = 5000; - count_columns = 5000; - global_matrix = - korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); - global_min.resize(count_rows, INT_MAX); - - for (auto& row : global_matrix) { - taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); - } - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - if (world.rank() == 0) { - for (size_t i = 0; i < global_min.size(); ++i) { - ASSERT_EQ(global_min[i], INT_MIN); - } - } -} diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp b/tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp deleted file mode 100644 index 832b8910d91..00000000000 --- a/tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - - int rows = taskData->inputs_count[0]; - int cols = taskData->inputs_count[1]; - - input_.resize(rows, std::vector(cols)); - - for (int i = 0; i < rows; i++) { - int* input_matrix = reinterpret_cast(taskData->inputs[i]); - for (int j = 0; j < cols; j++) { - input_[i][j] = input_matrix[j]; - } - } - res_.resize(rows); - return true; -} - -bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && - (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && - (taskData->outputs_count[0] == taskData->inputs_count[0])); -} - -bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::run() { - internal_order_test(); - - for (size_t i = 0; i < input_.size(); i++) { - int min_val = input_[i][0]; - for (size_t j = 1; j < input_[i].size(); j++) { - if (input_[i][j] < min_val) { - min_val = input_[i][j]; - } - } - res_[i] = min_val; - } - return true; -} - -bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - - int* output_matrix = reinterpret_cast(taskData->outputs[0]); - for (size_t i = 0; i < res_.size(); i++) { - output_matrix[i] = res_[i]; - } - return true; -} - -bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - int rows = 0; - int cols = 0; - - if (world.rank() == 0) { - rows = taskData->inputs_count[0]; - cols = taskData->inputs_count[1]; - } - - broadcast(world, rows, 0); - broadcast(world, cols, 0); - - int delta = rows / world.size(); - int extra = rows % world.size(); - - if (world.rank() == 0) { - input_.resize(rows, std::vector(cols)); - for (int i = 0; i < rows; i++) { - int* input_matrix = reinterpret_cast(taskData->inputs[i]); - input_[i].assign(input_matrix, input_matrix + cols); - } - - for (int proc = 1; proc < world.size(); proc++) { - int start_row = proc * delta + std::min(proc, extra); - int num_rows = delta + (proc < extra ? 1 : 0); - for (int r = start_row; r < start_row + num_rows; r++) { - world.send(proc, 0, input_[r].data(), cols); - } - } - } - - int local_rows = delta + (world.rank() < extra ? 1 : 0); - - local_input_.resize(local_rows, std::vector(cols)); - - if (world.rank() == 0) { - std::copy(input_.begin(), input_.begin() + local_rows, local_input_.begin()); - } else { - for (int r = 0; r < local_rows; r++) { - world.recv(0, 0, local_input_[r].data(), cols); - } - } - - res_.resize(rows); - return true; -} - -bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - - if (world.rank() == 0) { - return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && - (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && - (taskData->outputs_count[0] == taskData->inputs_count[0])); - } - return true; -} - -bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - std::vector local_mins(local_input_.size(), INT_MAX); - for (size_t i = 0; i < local_input_.size(); i++) { - for (const auto& val : local_input_[i]) { - local_mins[i] = std::min(local_mins[i], val); - } - } - - if (world.rank() == 0) { - int current_ind = 0; - std::copy(local_mins.begin(), local_mins.end(), res_.begin()); - current_ind += local_mins.size(); - for (int proc = 1; proc < world.size(); proc++) { - int loc_size; - world.recv(proc, 0, &loc_size, 1); - std::vector loc_res_(loc_size); - world.recv(proc, 0, loc_res_.data(), loc_size); - copy(loc_res_.begin(), loc_res_.end(), res_.data() + current_ind); - current_ind += loc_res_.size(); - } - } else { - int loc_res__size = (int)local_mins.size(); - world.send(0, 0, &loc_res__size, 1); - world.send(0, 0, local_mins.data(), loc_res__size); - } - return true; -} - -bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - - if (world.rank() == 0) { - int* output_matrix = reinterpret_cast(taskData->outputs[0]); - std::copy(res_.begin(), res_.end(), output_matrix); - } - - return true; -} - -std::vector korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_vector(int size, int lower_bound, - int upper_bound) { - std::vector v1(size); - for (auto& num : v1) { - num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); - } - return v1; -} - -std::vector> korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(int rows, - int cols) { - std::vector> matrix1(rows, std::vector(cols)); - for (auto& row : matrix1) { - row = generate_rnd_vector(cols, -1000, 1000); - int rnd_index = std::rand() % cols; - row[rnd_index] = INT_MIN; - } - return matrix1; -} diff --git a/tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp b/tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp deleted file mode 100644 index e40afea053b..00000000000 --- a/tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp" - -TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_10x10_matrix) { - const int rows = 10; - const int cols = 10; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), true); - ASSERT_TRUE(testTaskSequential.pre_processing()); - ASSERT_TRUE(testTaskSequential.run()); - ASSERT_TRUE(testTaskSequential.post_processing()); - - for (int i = 0; i < rows; i++) { - ASSERT_EQ(v_res[i], INT_MIN); - } -} - -TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_100x100_matrix) { - const int rows = 100; - const int cols = 100; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), true); - ASSERT_TRUE(testTaskSequential.pre_processing()); - ASSERT_TRUE(testTaskSequential.run()); - ASSERT_TRUE(testTaskSequential.post_processing()); - - for (int i = 0; i < rows; i++) { - ASSERT_EQ(v_res[i], INT_MIN); - } -} - -TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_100x500_matrix) { - const int rows = 100; - const int cols = 500; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), true); - ASSERT_TRUE(testTaskSequential.pre_processing()); - ASSERT_TRUE(testTaskSequential.run()); - ASSERT_TRUE(testTaskSequential.post_processing()); - - for (int i = 0; i < rows; i++) { - ASSERT_EQ(v_res[i], INT_MIN); - } -} - -TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_5000x5000_matrix) { - const int rows = 5000; - const int cols = 5000; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), true); - ASSERT_TRUE(testTaskSequential.pre_processing()); - ASSERT_TRUE(testTaskSequential.run()); - ASSERT_TRUE(testTaskSequential.post_processing()); - - for (int i = 0; i < rows; i++) { - ASSERT_EQ(v_res[i], INT_MIN); - } -} - -TEST(korovin_n_min_val_row_matrix_seq, validation_input_empty_100x100_matrix) { - const int rows = 100; - const int cols = 100; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(korovin_n_min_val_row_matrix_seq, validation_output_empty_100x100_matrix) { - const int rows = 100; - const int cols = 100; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(korovin_n_min_val_row_matrix_seq, validation_less_two_100x100_matrix) { - const int rows = 100; - const int cols = 100; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(korovin_n_min_val_row_matrix_seq, validation_less_two_cols_100x100_matrix) { - const int rows = 100; - const int cols = 100; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(korovin_n_min_val_row_matrix_seq, validation_find_min_val_in_row_0x10_matrix) { - const int rows = 0; - const int cols = 10; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(korovin_n_min_val_row_matrix_seq, validation_find_min_val_in_row_10x10_cols_0_matrix) { - const int rows = 10; - const int cols = 10; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(0); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(korovin_n_min_val_row_matrix_seq, validation_fails_on_invalid_output_size) { - const int rows = 10; - const int cols = 10; - - std::shared_ptr taskDataSeq = std::make_shared(); - - korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows - 1, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - ASSERT_EQ(testTaskSequential.validation(), false); -} diff --git a/tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp b/tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp deleted file mode 100644 index 49ce7c430a7..00000000000 --- a/tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace korovin_n_min_val_row_matrix_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) { - std::srand(std::time(nullptr)); - } - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - static std::vector generate_rnd_vector(int size, int lower_bound = 0, int upper_bound = 50); - static std::vector> generate_rnd_matrix(int rows, int cols); - - private: - std::vector> input_; - std::vector res_; -}; - -} // namespace korovin_n_min_val_row_matrix_seq \ No newline at end of file diff --git a/tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp b/tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp deleted file mode 100644 index 301f425150c..00000000000 --- a/tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp" - -TEST(korovin_n_min_val_row_matrix_seq, test_pipeline_run) { - const int rows = 5000; - const int cols = 5000; - - std::shared_ptr taskDataSeq = std::make_shared(); - auto testTaskSequential = std::make_shared(taskDataSeq); - - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; // Set the number of runs as needed - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - for (int i = 0; i < rows; i++) { - ASSERT_EQ(v_res[i], INT_MIN); - } -} - -TEST(korovin_n_min_val_row_matrix_seq, test_task_run) { - const int rows = 5000; - const int cols = 5000; - - std::shared_ptr taskDataSeq = std::make_shared(); - auto testTaskSequential = std::make_shared(taskDataSeq); - - std::vector> matrix_rnd = - korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); - - for (auto& row : matrix_rnd) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - } - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector v_res(rows, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); - taskDataSeq->outputs_count.emplace_back(v_res.size()); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - for (int i = 0; i < rows; i++) { - ASSERT_EQ(v_res[i], INT_MIN); - } -} diff --git a/tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp b/tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp deleted file mode 100644 index 7d80ba6374c..00000000000 --- a/tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2024 Nesterov Alexander -#include "seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp" - -#include - -using namespace std::chrono_literals; - -bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - - int rows = taskData->inputs_count[0]; - int cols = taskData->inputs_count[1]; - - input_.resize(rows, std::vector(cols)); - - for (int i = 0; i < rows; i++) { - int* input_matrix = reinterpret_cast(taskData->inputs[i]); - for (int j = 0; j < cols; j++) { - input_[i][j] = input_matrix[j]; - } - } - res_.resize(rows); - return true; -} - -bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::validation() { - internal_order_test(); - - return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && - (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && - (taskData->outputs_count[0] == taskData->inputs_count[0])); -} - -bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::run() { - internal_order_test(); - - for (size_t i = 0; i < input_.size(); i++) { - int min_val = input_[i][0]; - for (size_t j = 1; j < input_[i].size(); j++) { - if (input_[i][j] < min_val) { - min_val = input_[i][j]; - } - } - res_[i] = min_val; - } - return true; -} - -bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::post_processing() { - internal_order_test(); - - int* output_matrix = reinterpret_cast(taskData->outputs[0]); - for (size_t i = 0; i < res_.size(); i++) { - output_matrix[i] = res_[i]; - } - return true; -} - -std::vector korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_vector(int size, int lower_bound, - int upper_bound) { - std::vector v1(size); - for (auto& num : v1) { - num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); - } - return v1; -} - -std::vector> korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(int rows, - int cols) { - std::vector> matrix1(rows, std::vector(cols)); - for (auto& row : matrix1) { - row = generate_rnd_vector(cols, -1000, 1000); - int rnd_index = std::rand() % cols; - row[rnd_index] = INT_MIN; - } - return matrix1; -} \ No newline at end of file From 1a5f4e703d76c3cf97a4c05e785c193a82f3aa51 Mon Sep 17 00:00:00 2001 From: ovVrLFg8ks <122876910+ovVrLFg8ks@users.noreply.github.com> Date: Sun, 3 Nov 2024 16:43:08 +0300 Subject: [PATCH 060/155] =?UTF-8?q?=D0=9E=D1=82=D1=83=D1=80=D0=B8=D0=BD=20?= =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4=D1=80.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2015.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA=D1=81=D0=B8?= =?UTF-8?q?=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0?= =?UTF-8?q?=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8?= =?UTF-8?q?=D1=86=D1=8B.=20(#75)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание последовательной задачи:** Для каждой строки m вычисляется максимум и записывается в вектор максимумов размера m. **Описание MPI задачи:** Каждый процесс получает ширину и высоту матрицы (n и m соответственно), процесс 0-го ранга получает матрицу как одномерный вектор и вектор для записи результатов. Далее в процессе 0-го ранга: Основываясь на количестве процессов ненулевого ранга запускается цикл, в котором процессам, которым будет послана строка размера n посылается сигнал, затем и сама строка. После в этом же цикле 0-ой процесс получает от ненулевых процессов результат. Если количество процессов превышает кол-во строк в итерации цикла, то им *пока что* не посылается ничего. После цикла всем процессам посылается сигнал о завершении работы. В процессах ненулевого ранга: Запускается цикл, в котором сначала проверяется сигнал о выходе, и если сигнал невыходной, получается строка, у которой вычисляется максимум, который отправляется нулевому процессу. *Предыдущий пул-реквест был отменён, т.к. там было изменено 56 файлов...* --- .../func_tests/main.cpp | 326 ++++++++++++++++++ .../include/ops_mpi.hpp | 58 ++++ .../perf_tests/main.cpp | 106 ++++++ .../src/ops_mpi.cpp | 151 ++++++++ .../func_tests/main.cpp | 122 +++++++ .../include/ops_seq.hpp | 25 ++ .../perf_tests/main.cpp | 84 +++++ .../src/ops_seq.cpp | 43 +++ 8 files changed, 915 insertions(+) create mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..f04d61b8035 --- /dev/null +++ b/tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,326 @@ +#include + +#include +#include +#include +#include + +#include "mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp" + +std::vector oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +// squarelike +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_1) { + size_t n = 5; + size_t m = 5; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +// rectangular +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_2) { + size_t n = 10; + size_t m = 15; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_3) { + size_t n = 15; + size_t m = 10; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_4) { + size_t n = 1; + size_t m = 15; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_5) { + size_t n = 15; + size_t m = 1; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_EMPTY) { + size_t n = 0; + size_t m = 0; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..a4c5cbf561d --- /dev/null +++ b/tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp @@ -0,0 +1,58 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace oturin_a_max_values_by_rows_matrix_mpi { + +std::vector getRandomVector(int sz); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + size_t n = 0; + size_t m = 0; + std::vector input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + /* + m maxes: + ^ + | -9 99 : 99 + | 12 06 : 12 + +------> n + */ + size_t n = 0; + size_t m = 0; + std::vector input_, local_input_; + std::vector res; + + boost::mpi::communicator world; +}; + +} // namespace oturin_a_max_values_by_rows_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..b5ed07d8a2f --- /dev/null +++ b/tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,106 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp" + +std::vector oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_perftest, test_pipeline_run) { + size_t n = 300; + size_t m = 300; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ((int)(n * m), global_max[0]); + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_perftest, test_task_run) { + size_t n = 300; + size_t m = 300; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ((int)(n * m), global_max[0]); + } +} diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..2e8f40c19d4 --- /dev/null +++ b/tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp @@ -0,0 +1,151 @@ +#include "mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + n = (size_t)(taskData->inputs_count[0]); + m = (size_t)(taskData->inputs_count[1]); + input_ = std::vector(n * m); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_ = std::vector(tmp_ptr, tmp_ptr + n * m); + // Init values for output + res = std::vector(m, 0); + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check elements count in i/o + // m & maxes: + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < m; i++) { + res[i] = *std::max_element(input_.begin() + i * n, input_.begin() + (i + 1) * n); + } + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + for (size_t i = 0; i < m; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} +//////////////////////////////////////////////////////////////////////////////////////// + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[1] == taskData->outputs_count[0]; + } + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + // Init vectors + n = (size_t)(taskData->inputs_count[0]); + m = (size_t)(taskData->inputs_count[1]); + + if (world.rank() == 0) { + input_ = std::vector(n * m); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_ = std::vector(tmp_ptr, tmp_ptr + n * m); + // Init values for output + res = std::vector(m, 0); + } + + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + const int TAG_EXIT = 1; + const int TAG_TOBASE = 2; + const int TAG_TOSAT = 3; + +#if defined(_MSC_VER) && !defined(__clang__) + if (world.size() == 1) { + for (size_t i = 0; i < m; i++) { + res[i] = *std::max_element(input_.begin() + i * n, input_.begin() + (i + 1) * n); + } + return true; + } +#endif + + if (world.rank() == 0) { // base + size_t satellites = world.size() - 1; + + int proc_exit = 0; + int proc_wait = 1; + + if (m == 0 || n == 0) { + for (size_t i = 0; i < satellites; i++) { + world.send(i + 1, TAG_EXIT, &proc_exit, 1); + } + return true; + } + + int *arr = new int[m * n]; + int *maxes = new int[m]; + + std::copy(input_.begin(), input_.end(), arr); + + size_t row = 0; + while (row < m) { + for (size_t i = 0; i < std::min(satellites, m - row); i++) { + world.send(i + 1, TAG_EXIT, &proc_wait, 1); + world.send(i + 1, TAG_TOSAT, &arr[(row + i) * n], n); + } + + for (size_t i = 0; i < std::min(satellites, m - row); i++) { + world.recv(i + 1, TAG_TOBASE, &maxes[row + i], 1); + } + row += satellites; + } + for (size_t i = 0; i < satellites; i++) // close all satellite processes + world.send(i + 1, TAG_EXIT, &proc_exit, 1); + + res.assign(maxes, maxes + m); + + delete[] arr; + delete[] maxes; + } else { // satelleite + int *arr = new int[n]; + int proc_exit; + while (true) { + int out = INT_MIN; + world.recv(0, TAG_EXIT, &proc_exit, 1); + if (proc_exit == 0) break; + + world.recv(0, TAG_TOSAT, arr, n); + + for (size_t i = 0; i < n; i++) out = std::max(arr[i], out); + + world.send(0, TAG_TOBASE, &out, 1); + } + delete[] arr; + } + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (size_t i = 0; i < m; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..886af56a69d --- /dev/null +++ b/tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,122 @@ +#include + +#include +#include + +#include "seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_5_5) { + size_t n = 5; + size_t m = 5; + + // Create data + std::vector in(n * m); + std::vector out(m, 0); + std::vector maxes(m); + + std::iota(std::begin(in), std::end(in), 1); + for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(maxes, out); +} + +TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_10_5) { + size_t n = 10; + size_t m = 5; + + // Create data + std::vector in(n * m); + std::vector out(m, 0); + std::vector maxes(m); + + std::iota(std::begin(in), std::end(in), 1); + for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(maxes, out); +} + +TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_5_10) { + size_t n = 5; + size_t m = 10; + + // Create data + std::vector in(n * m); + std::vector out(m, 0); + std::vector maxes(m); + + std::iota(std::begin(in), std::end(in), 1); + for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(maxes, out); +} + +TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_EMPTY) { + size_t n = 0; + size_t m = 0; + + // Create data + std::vector in(n * m); + std::vector out(m, 0); + std::vector maxes(m); + + std::iota(std::begin(in), std::end(in), 1); + for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(maxes, out); +} diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..ed042671b44 --- /dev/null +++ b/tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace oturin_a_max_values_by_rows_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + size_t n = 0; + size_t m = 0; + std::vector input_; + std::vector res; +}; + +} // namespace oturin_a_max_values_by_rows_matrix_seq diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..84af1763195 --- /dev/null +++ b/tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,84 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(oturin_a_max_values_by_rows_matrix_seq_perftest, test_pipeline_run) { + size_t n = 500; + size_t m = 500; + + // Create data + std::vector in(n * m, 0); + std::vector out(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(m, taskDataSeq->outputs_count.back()); +} + +TEST(oturin_a_max_values_by_rows_matrix_seq_perftest, test_task_run) { + size_t n = 500; + size_t m = 500; + + // Create data + std::vector in(n * m, 0); + std::vector out(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(m, taskDataSeq->outputs_count.back()); +} diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..588586376cf --- /dev/null +++ b/tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp @@ -0,0 +1,43 @@ +#include "seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + n = (size_t)(taskData->inputs_count[0]); + m = (size_t)(taskData->inputs_count[1]); + input_ = std::vector(n * m); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < n * m; i++) { + input_[i] = tmp_ptr[i]; + } + // Init values for output + res = std::vector(m, 0); + return true; +} + +bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check elements count in i/o + // m & maxes: + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < m; i++) res[i] = *std::max_element(input_.begin() + i * n, input_.begin() + (i + 1) * n); + return true; +} + +bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (size_t i = 0; i < m; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} From be31eac473daee6f2edf831579867ff55439a35a Mon Sep 17 00:00:00 2001 From: lizardwizardd <74024127+lizardwizardd@users.noreply.github.com> Date: Sun, 3 Nov 2024 17:03:51 +0300 Subject: [PATCH 061/155] =?UTF-8?q?=D0=9C=D0=B8=D0=BB=D0=BE=D0=B2=D0=B0?= =?UTF-8?q?=D0=BD=D0=BA=D0=B8=D0=BD=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=201.=20=D0=A1=D1=83=D0=BC=D0=BC?= =?UTF-8?q?=D0=B0=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#62)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit _**Последовательная задача**_ Итерация по элементам вектора и суммирование значений. **_Параллельная задача_** Вектор разделяется между процессами на равные части. Если количество элементов N не делится нацело на количество процессов P, то первые `N % P` процессов обрабатывают еще по одному элементу. Данные распределяются по процессам функцией `scatterv`, каждый процесс подсчитывает свою часть с помощью `accumulate`, затем получившиеся в процессах суммы складываются функцией `reduce`. --- .../func_tests/main.cpp | 121 ++++++++++++++++++ .../include/ops_mpi.hpp | 37 ++++++ .../perf_tests/main.cpp | 69 ++++++++++ .../src/ops_mpi.cpp | 103 +++++++++++++++ .../func_tests/main.cpp | 111 ++++++++++++++++ .../include/ops_seq.hpp | 22 ++++ .../perf_tests/main.cpp | 79 ++++++++++++ .../src/ops_seq.cpp | 39 ++++++ 8 files changed, 581 insertions(+) create mode 100644 tasks/mpi/milovankin_m_sum_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/milovankin_m_sum_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/milovankin_m_sum_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/milovankin_m_sum_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/milovankin_m_sum_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/milovankin_m_sum_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/milovankin_m_sum_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/milovankin_m_sum_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/milovankin_m_sum_of_vector_elements/func_tests/main.cpp b/tasks/mpi/milovankin_m_sum_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..68c0cbae329 --- /dev/null +++ b/tasks/mpi/milovankin_m_sum_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,121 @@ +#include + +#include +#include +#include +#include + +#include "mpi/milovankin_m_sum_of_vector_elements/include/ops_mpi.hpp" + +namespace milovankin_m_sum_of_vector_elements_parallel { +[[nodiscard]] std::vector make_random_vector(int32_t size, int32_t val_min, int32_t val_max) { + std::vector new_vector(size); + + for (int32_t i = 0; i < size; i++) { + new_vector[i] = rand() % (val_max - val_min + 1) + val_min; + } + + return new_vector; +} +} // namespace milovankin_m_sum_of_vector_elements_parallel + +void run_parallel_and_sequential_tasks(std::vector &input_vector, int64_t expected_sum) { + boost::mpi::communicator world; + std::vector result_parallel(1, 0); + std::vector result_sequential(1, 0); + + // Task data parallel + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input_vector.data())); + taskDataPar->inputs_count.emplace_back(input_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result_parallel.data())); + taskDataPar->outputs_count.emplace_back(result_parallel.size()); + } + + // Parallel + milovankin_m_sum_of_vector_elements_parallel::VectorSumPar testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Task data sequential + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_vector.data())); + taskDataSeq->inputs_count.emplace_back(input_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_sequential.data())); + taskDataSeq->outputs_count.emplace_back(result_sequential.size()); + + // Sequential + milovankin_m_sum_of_vector_elements_parallel::VectorSumSeq testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.validation(); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + // Assert + ASSERT_EQ(result_sequential[0], result_parallel[0]); + ASSERT_EQ(result_sequential[0], expected_sum); + } +} + +TEST(milovankin_m_sum_of_vector_elements_mpi, randomVector50000) { + boost::mpi::communicator world; + std::vector input_vector; + + if (world.rank() == 0) { + input_vector = milovankin_m_sum_of_vector_elements_parallel::make_random_vector(50000, -500, 5000); + } + + run_parallel_and_sequential_tasks(input_vector, std::accumulate(input_vector.begin(), input_vector.end(), 0)); +} + +TEST(milovankin_m_sum_of_vector_elements_mpi, regularVector) { + std::vector input_vector = {1, 2, 3, -5, 3, 43}; + run_parallel_and_sequential_tasks(input_vector, 47); +} + +TEST(milovankin_m_sum_of_vector_elements_mpi, positiveNumbers) { + std::vector input_vector = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + run_parallel_and_sequential_tasks(input_vector, 55); +} + +TEST(milovankin_m_sum_of_vector_elements_mpi, negativeNumbers) { + std::vector input_vector = {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10}; + run_parallel_and_sequential_tasks(input_vector, -55); +} + +TEST(milovankin_m_sum_of_vector_elements_mpi, zeroVector) { + std::vector input_vector = {0, 0, 0, 0, 0}; + run_parallel_and_sequential_tasks(input_vector, 0); +} + +TEST(milovankin_m_sum_of_vector_elements_mpi, tinyVector) { + std::vector input_vector = {4, -20}; + run_parallel_and_sequential_tasks(input_vector, -16); +} + +TEST(milovankin_m_sum_of_vector_elements_mpi, emptyVector) { + std::vector input_vector = {}; + run_parallel_and_sequential_tasks(input_vector, 0); +} + +TEST(milovankin_m_sum_of_vector_elements_mpi, validationNotPassed) { + boost::mpi::communicator world; + + std::vector input = {1, 2, 3, -5}; + + std::shared_ptr taskData = std::make_shared(); + if (world.rank() == 0) { + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + // Omitting output setup to cause validation to fail + } + + milovankin_m_sum_of_vector_elements_parallel::VectorSumPar vectorSumPar(taskData); + if (world.rank() == 0) { + ASSERT_FALSE(vectorSumPar.validation()); + } +} diff --git a/tasks/mpi/milovankin_m_sum_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/milovankin_m_sum_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..a59d105d802 --- /dev/null +++ b/tasks/mpi/milovankin_m_sum_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace milovankin_m_sum_of_vector_elements_parallel { +// No changes to seq version +class VectorSumSeq : public ppc::core::Task { + public: + explicit VectorSumSeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int64_t sum_ = 0; +}; + +class VectorSumPar : public ppc::core::Task { + public: + explicit VectorSumPar(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int64_t sum_ = 0; + boost::mpi::communicator world; +}; + +} // namespace milovankin_m_sum_of_vector_elements_parallel diff --git a/tasks/mpi/milovankin_m_sum_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/milovankin_m_sum_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..d43226d9069 --- /dev/null +++ b/tasks/mpi/milovankin_m_sum_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,69 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/milovankin_m_sum_of_vector_elements/include/ops_mpi.hpp" + +TEST(milovankin_m_sum_of_vector_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector input_vector; + std::vector result_parallel(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + int vector_size = 50'000'000; + + if (world.rank() == 0) { + input_vector.resize(vector_size, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(input_vector.data())); + taskDataPar->inputs_count.emplace_back(input_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result_parallel.data())); + taskDataPar->outputs_count.emplace_back(result_parallel.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(vector_size, result_parallel[0]); + } +} + +TEST(milovankin_m_sum_of_vector_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector input_vector; + std::vector result_parallel(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + int vector_size = 50'000'000; + + if (world.rank() == 0) { + input_vector = std::vector(vector_size, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(input_vector.data())); + taskDataPar->inputs_count.emplace_back(input_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result_parallel.data())); + taskDataPar->outputs_count.emplace_back(result_parallel.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(vector_size, result_parallel[0]); + } +} diff --git a/tasks/mpi/milovankin_m_sum_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/milovankin_m_sum_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..501c485f903 --- /dev/null +++ b/tasks/mpi/milovankin_m_sum_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,103 @@ +#include "mpi/milovankin_m_sum_of_vector_elements/include/ops_mpi.hpp" + +namespace milovankin_m_sum_of_vector_elements_parallel { +// +// Sequential version +// +bool VectorSumSeq::validation() { + internal_order_test(); + + return !taskData->outputs.empty() && taskData->outputs_count[0] == 1; +} + +bool VectorSumSeq::pre_processing() { + internal_order_test(); + + // Fill input vector from taskData + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + input_.resize(taskData->inputs_count[0]); + std::copy(input_ptr, input_ptr + taskData->inputs_count[0], input_.begin()); + + return true; +} + +bool VectorSumSeq::run() { + internal_order_test(); + + sum_ = 0; + for (int32_t num : input_) { + sum_ += num; + } + + return true; +} + +bool VectorSumSeq::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = sum_; + return true; +} + +// +// Parallel version +// + +bool VectorSumPar::validation() { + internal_order_test(); + + return !taskData->outputs.empty() && taskData->outputs_count[0] == 1; +} + +bool VectorSumPar::pre_processing() { + internal_order_test(); + sum_ = 0; + + return true; +} + +bool VectorSumPar::run() { + internal_order_test(); + + int my_rank = world.rank(); + int world_size = world.size(); + int total_size = 0; + + // Fill input vector from taskData + if (my_rank == 0) { + total_size = taskData->inputs_count[0]; + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + input_.assign(input_ptr, input_ptr + total_size); + } + + boost::mpi::broadcast(world, total_size, 0); + + // Create vectors for scatterv + int local_size = total_size / world_size + (my_rank < (total_size % world_size) ? 1 : 0); + std::vector send_counts(world_size, total_size / world_size); + std::vector offsets(world_size, 0); + + // Handle the case when total_size is not divisible by world_size + for (int i = 0; i < total_size % world_size; ++i) send_counts[i]++; + for (int i = 1; i < world_size; ++i) offsets[i] = offsets[i - 1] + send_counts[i - 1]; + + // Scatter data to local vectors + local_input_.resize(send_counts[my_rank]); + boost::mpi::scatterv(world, input_.data(), send_counts, offsets, local_input_.data(), local_size, 0); + + int64_t local_sum = std::accumulate(local_input_.begin(), local_input_.end(), int64_t(0)); + boost::mpi::reduce(world, local_sum, sum_, std::plus<>(), 0); + + return true; +} + +bool VectorSumPar::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = sum_; + } + + return true; +} + +} // namespace milovankin_m_sum_of_vector_elements_parallel diff --git a/tasks/seq/milovankin_m_sum_of_vector_elements/func_tests/main.cpp b/tasks/seq/milovankin_m_sum_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..dcac10ba21d --- /dev/null +++ b/tasks/seq/milovankin_m_sum_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,111 @@ +#include + +#include + +#include "seq/milovankin_m_sum_of_vector_elements/include/ops_seq.hpp" + +TEST(milovankin_m_sum_of_vector_elements, regularVector) { + std::vector input = {1, 2, 3, -5, 3, 43}; + int64_t expected = 47; + int64_t actual = 0; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + milovankin_m_sum_of_vector_elements_seq::VectorSumSeq vectorSumSeq(taskData); + ASSERT_TRUE(vectorSumSeq.validation()); + vectorSumSeq.pre_processing(); + vectorSumSeq.run(); + vectorSumSeq.post_processing(); + ASSERT_EQ(expected, actual); +} + +TEST(milovankin_m_sum_of_vector_elements, positiveNumbers) { + std::vector input = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + int64_t expected = 55; + int64_t actual = 0; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + milovankin_m_sum_of_vector_elements_seq::VectorSumSeq vectorSumSeq(taskData); + ASSERT_TRUE(vectorSumSeq.validation()); + vectorSumSeq.pre_processing(); + vectorSumSeq.run(); + vectorSumSeq.post_processing(); + ASSERT_EQ(expected, actual); +} + +TEST(milovankin_m_sum_of_vector_elements, negativeNumbers) { + std::vector input = {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10}; + int64_t expected = -55; + int64_t actual = 0; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + milovankin_m_sum_of_vector_elements_seq::VectorSumSeq vectorSumSeq(taskData); + ASSERT_TRUE(vectorSumSeq.validation()); + vectorSumSeq.pre_processing(); + vectorSumSeq.run(); + vectorSumSeq.post_processing(); + ASSERT_EQ(expected, actual); +} + +TEST(milovankin_m_sum_of_vector_elements, zeroVector) { + std::vector input = {0, 0, 0, 0}; + int64_t expected = 0; + int64_t actual = 0; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + milovankin_m_sum_of_vector_elements_seq::VectorSumSeq vectorSumSeq(taskData); + ASSERT_TRUE(vectorSumSeq.validation()); + vectorSumSeq.pre_processing(); + vectorSumSeq.run(); + vectorSumSeq.post_processing(); + ASSERT_EQ(expected, actual); +} + +TEST(milovankin_m_sum_of_vector_elements, emptyVector) { + std::vector input = {}; + int64_t expected = 0; + int64_t actual = 0; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + milovankin_m_sum_of_vector_elements_seq::VectorSumSeq vectorSumSeq(taskData); + ASSERT_TRUE(vectorSumSeq.validation()); + vectorSumSeq.pre_processing(); + vectorSumSeq.run(); + vectorSumSeq.post_processing(); + ASSERT_EQ(expected, actual); +} + +TEST(milovankin_m_sum_of_vector_elements, validationNotPassed) { + std::vector input = {1, 2, 3, -5}; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + + milovankin_m_sum_of_vector_elements_seq::VectorSumSeq vectorSumSeq(taskData); + ASSERT_FALSE(vectorSumSeq.validation()); +} diff --git a/tasks/seq/milovankin_m_sum_of_vector_elements/include/ops_seq.hpp b/tasks/seq/milovankin_m_sum_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..e27fbb454f4 --- /dev/null +++ b/tasks/seq/milovankin_m_sum_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace milovankin_m_sum_of_vector_elements_seq { + +class VectorSumSeq : public ppc::core::Task { + public: + explicit VectorSumSeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int64_t sum_ = 0; +}; + +} // namespace milovankin_m_sum_of_vector_elements_seq diff --git a/tasks/seq/milovankin_m_sum_of_vector_elements/perf_tests/main.cpp b/tasks/seq/milovankin_m_sum_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..f9553e6c61b --- /dev/null +++ b/tasks/seq/milovankin_m_sum_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,79 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/milovankin_m_sum_of_vector_elements/include/ops_seq.hpp" + +TEST(milovankin_m_sum_of_vector_elements_seq, test_pipeline_run) { + // Create data + const int32_t vec_size = 50'000'000; + std::vector input_data(vec_size, 1); + auto expected_sum = static_cast(vec_size); + int64_t actual_sum = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskDataSeq->inputs_count.emplace_back(input_data.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&actual_sum)); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + auto vectorSumSequential = std::make_shared(taskDataSeq); + + // Set up Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Initialize perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer and run + auto perfAnalyzer = std::make_shared(vectorSumSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_sum, actual_sum); +} + +TEST(milovankin_m_sum_of_vector_elements_seq, test_task_run) { + const int32_t count = 50'000'000; + std::vector input_data(count, 1); + auto expected_sum = static_cast(count); + int64_t actual_sum = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskDataSeq->inputs_count.emplace_back(input_data.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&actual_sum)); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + auto vectorSumSequential = std::make_shared(taskDataSeq); + + // Set up Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Initialize perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer and run + auto perfAnalyzer = std::make_shared(vectorSumSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_sum, actual_sum); +} diff --git a/tasks/seq/milovankin_m_sum_of_vector_elements/src/ops_seq.cpp b/tasks/seq/milovankin_m_sum_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..0f086a6eb9c --- /dev/null +++ b/tasks/seq/milovankin_m_sum_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,39 @@ +#include "seq/milovankin_m_sum_of_vector_elements/include/ops_seq.hpp" + +namespace milovankin_m_sum_of_vector_elements_seq { + +bool VectorSumSeq::validation() { + internal_order_test(); + + return !taskData->outputs.empty() && taskData->outputs_count[0] == 1; +} + +bool VectorSumSeq::pre_processing() { + internal_order_test(); + + // Fill input vector from taskData + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + input_.resize(taskData->inputs_count[0]); + std::copy(input_ptr, input_ptr + taskData->inputs_count[0], input_.begin()); + + return true; +} + +bool VectorSumSeq::run() { + internal_order_test(); + + sum_ = 0; + for (int32_t num : input_) { + sum_ += num; + } + + return true; +} + +bool VectorSumSeq::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = sum_; + return true; +} + +} // namespace milovankin_m_sum_of_vector_elements_seq From 3c7a36069e7ad7f52c3c8dbf574f71b158dc1f41 Mon Sep 17 00:00:00 2001 From: MaximChizhov <113035533+MaximChizhov@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:40:24 +0300 Subject: [PATCH 062/155] =?UTF-8?q?=D0=A7=D0=B8=D0=B6=D0=BE=D0=B2=20=D0=9C?= =?UTF-8?q?=D0=B0=D0=BA=D1=81=D0=B8=D0=BC.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2016.=20=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA=D1=81=D0=B8=D0=BC=D0=B0=D0=BB?= =?UTF-8?q?=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE=D0=BB=D0=B1?= =?UTF-8?q?=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B?= =?UTF-8?q?=20(#44)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Матрица представлена в виде вектора целых чисел **Описание последовательной задачи** В качестве максимального элемента выбирается первый элемент столбца. Дальше каждый элемент столбца сравнивается с максимальным, если значение элемента столбца больше максимального, то максимальный элемент обновляется. В конце выполнения функции возвращается вектор, который содержит максимальные значения каждого столбца. **Описание MPI задачи** В зависимости от количества столбцов и процессов матрица делится. Каждый процесс получает определенное количество столбцов, в которых находит максимальные значения в каждом столбце. Для этого рассчитывается стартовый столбец и последний столбец. Результат собирается при помощи функции gatherv в один вектор. --- .../func_tests/main.cpp | 355 ++++++++++++++++++ .../include/ops_mpi.hpp | 49 +++ .../perf_tests/main.cpp | 100 +++++ .../src/ops_mpi.cpp | 156 ++++++++ .../func_tests/main.cpp | 146 +++++++ .../include/ops_seq.hpp | 26 ++ .../perf_tests/main.cpp | 96 +++++ .../src/ops_seq.cpp | 63 ++++ 8 files changed, 991 insertions(+) create mode 100644 tasks/mpi/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/chizhov_m_max_values_by_columns_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp create mode 100644 tasks/seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/chizhov_m_max_values_by_columns_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..3883c3ea6ae --- /dev/null +++ b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp @@ -0,0 +1,355 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include +#include + +#include "mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + int val = gen() % 200 - 100; + if (val >= 0) { + vec[i] = val; + } + } + return vec; +} + +TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Zero_Columns) { + boost::mpi::communicator world; + + int cols = 0; + int rows = 0; + + std::vector matrix; + std::vector res_par(cols, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = cols * rows; + matrix = getRandomVector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(res_par.size()); + } + + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Empty_Matrix) { + boost::mpi::communicator world; + + int cols = 5; + int rows = 5; + + std::vector matrix; + std::vector res_par(cols, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(res_par.size()); + } + + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max1) { + boost::mpi::communicator world; + + int cols = 15; + int rows = 5; + + std::vector matrix; + std::vector res_par(cols, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = cols * rows; + matrix = getRandomVector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(res_par.size()); + } + + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_seq.size()); + + // Create Task + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_seq, res_par); + } +} + +TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max2) { + boost::mpi::communicator world; + + int cols = 50; + int rows = 50; + + std::vector matrix; + std::vector res_par(cols, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = cols * rows; + matrix = getRandomVector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(res_par.size()); + } + + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_seq.size()); + + // Create Task + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_seq, res_par); + } +} + +TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max3) { + boost::mpi::communicator world; + + int cols = 50; + int rows = 100; + + std::vector matrix; + std::vector res_par(cols, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = cols * rows; + matrix = getRandomVector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(res_par.size()); + } + + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_seq.size()); + + // Create Task + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_seq, res_par); + } +} + +TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max4) { + boost::mpi::communicator world; + + int cols = 70; + int rows = 50; + + std::vector matrix; + std::vector res_par(cols, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = cols * rows; + matrix = getRandomVector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(res_par.size()); + } + + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_seq.size()); + + // Create Task + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_seq, res_par); + } +} + +TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max5) { + boost::mpi::communicator world; + + int cols = 300; + int rows = 150; + + std::vector matrix; + std::vector res_par(cols, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = cols * rows; + matrix = getRandomVector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(res_par.size()); + } + + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_seq.size()); + + // Create Task + chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_seq, res_par); + } +} \ No newline at end of file diff --git a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..b8b41a0db71 --- /dev/null +++ b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace chizhov_m_max_values_by_columns_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res_{}; + int cols{}; + int rows{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + std::vector res_{}; + int cols{}; + int rows{}; + boost::mpi::communicator world; +}; + +} // namespace chizhov_m_max_values_by_columns_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..10362470263 --- /dev/null +++ b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp @@ -0,0 +1,100 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp" + +TEST(chizhov_m_max_values_by_columns_matrix_perf_test, test_pipeline_run) { + int rows = 1000; + int columns = 4000; + boost::mpi::communicator world; + std::vector matrix; + std::vector max_vec_mpi(columns, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + matrix = std::vector(rows * columns, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(max_vec_mpi.data())); + taskDataPar->outputs_count.emplace_back(max_vec_mpi.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < max_vec_mpi.size(); i++) { + EXPECT_EQ(1, max_vec_mpi[0]); + } + } +} + +TEST(chizhov_m_max_values_by_columns_matrix_perf_test, test_task_run) { + int rows = 1000; + int columns = 4000; + boost::mpi::communicator world; + std::vector matrix; + std::vector max_vec_mpi(columns, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + matrix = std::vector(rows * columns, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(max_vec_mpi.data())); + taskDataPar->outputs_count.emplace_back(max_vec_mpi.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < max_vec_mpi.size(); i++) { + EXPECT_EQ(1, max_vec_mpi[0]); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/src/ops_mpi.cpp b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..29889a93cb1 --- /dev/null +++ b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/src/ops_mpi.cpp @@ -0,0 +1,156 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + + cols = taskData->inputs_count[1]; + rows = taskData->inputs_count[2]; + + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + + res_ = std::vector(cols, 0); + + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + if (taskData->inputs_count[1] == 0 || taskData->inputs_count[2] == 0) { + return false; + } + if (taskData->inputs.empty() || taskData->inputs_count[0] <= 0) { + return false; + } + if (taskData->inputs_count[1] != taskData->outputs_count[0]) { + return false; + } + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + for (int j = 0; j < cols; j++) { + int maxElement = input_[j]; + for (int i = 1; i < rows; i++) { + if (input_[i * cols + j] > maxElement) { + maxElement = input_[i * cols + j]; + } + } + res_[j] = maxElement; + } + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < cols; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res_[i]; + } + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + cols = taskData->inputs_count[1]; + rows = taskData->inputs_count[2]; + } + + if (world.rank() == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + } else { + input_ = std::vector(cols * rows, 0); + } + + res_ = std::vector(cols, 0); + + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if (taskData->inputs_count[1] == 0 || taskData->inputs_count[2] == 0) { + return false; + } + if (taskData->inputs.empty() || taskData->inputs_count[0] <= 0) { + return false; + } + if (taskData->inputs_count[1] != taskData->outputs_count[0]) { + return false; + } + } + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + broadcast(world, cols, 0); + broadcast(world, rows, 0); + + if (world.rank() != 0) { + input_ = std::vector(cols * rows, 0); + } + broadcast(world, input_.data(), cols * rows, 0); + + int delta = cols / world.size(); + int extra = cols % world.size(); + if (extra != 0) { + delta += 1; + } + int startCol = delta * world.rank(); + int lastCol = std::min(cols, delta * (world.rank() + 1)); + std::vector localMax; + for (int j = startCol; j < lastCol; j++) { + int maxElem = input_[j]; + for (int i = 1; i < rows; i++) { + int coor = i * cols + j; + if (input_[coor] > maxElem) { + maxElem = input_[coor]; + } + } + localMax.push_back(maxElem); + } + localMax.resize(delta); + if (world.rank() == 0) { + std::vector globalRes(cols + delta * world.size()); + std::vector sizes(world.size(), delta); + boost::mpi::gatherv(world, localMax.data(), localMax.size(), globalRes.data(), sizes, 0); + globalRes.resize(cols); + res_ = globalRes; + } else { + boost::mpi::gatherv(world, localMax.data(), localMax.size(), 0); + } + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (int i = 0; i < cols; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res_[i]; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp b/tasks/seq/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..2b48e3d2b5b --- /dev/null +++ b/tasks/seq/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp @@ -0,0 +1,146 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp" + +TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Zero_Columns) { + std::shared_ptr taskDataSeq = std::make_shared(); + int columns = 0; + std::vector matrix; + std::vector res_seq(columns, 0); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->inputs_count.emplace_back(res_seq.size()); + + chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Empty_Matrix) { + std::shared_ptr taskDataSeq = std::make_shared(); + int columns = 3; + std::vector matrix; + std::vector res_seq(columns, 0); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->inputs_count.emplace_back(res_seq.size()); + + chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Max_3_Columns) { + int columns = 3; + + // Create data + std::vector matrix = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + std::vector max(columns, 0); + std::vector result = {7, 8, 9}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(max.data())); + taskDataSeq->outputs_count.emplace_back(max.size()); + + // Create Task + chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(result, max); +} + +TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Max_4_Columns) { + int columns = 4; + + // Create data + std::vector matrix = {4, 7, 5, 3, 8, 10, 12, 4, 2, 15, 3, 27}; + std::vector max(columns, 0); + std::vector result = {8, 15, 12, 27}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(max.data())); + taskDataSeq->outputs_count.emplace_back(max.size()); + + // Create Task + chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(result, max); +} + +TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Max_5_Columns) { + int columns = 5; + + // Create data + std::vector matrix = {4, 7, 5, 3, 8, 10, 12, 4, 2, 6, 2, 1, 15, 3, 27}; + std::vector max(columns, 0); + std::vector result = {10, 12, 15, 3, 27}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(max.data())); + taskDataSeq->outputs_count.emplace_back(max.size()); + + // Create Task + chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(result, max); +} + +TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Max_6_Columns) { + int columns = 6; + + // Create data + std::vector matrix = {9, 20, 3, 4, 7, 5, 3, 8, 10, 12, 4, 2, 6, 2, 1, 15, 3, 27}; + std::vector max(columns, 0); + std::vector result = {9, 20, 10, 15, 7, 27}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(max.data())); + taskDataSeq->outputs_count.emplace_back(max.size()); + + // Create Task + chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(result, max); +} \ No newline at end of file diff --git a/tasks/seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp b/tasks/seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..75bcd4a5f02 --- /dev/null +++ b/tasks/seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace chizhov_m_max_values_by_columns_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int cols{}; + int rows{}; + std::vector input_; + std::vector res_; +}; + +} // namespace chizhov_m_max_values_by_columns_matrix_seq \ No newline at end of file diff --git a/tasks/seq/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp b/tasks/seq/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..856bcbf19e0 --- /dev/null +++ b/tasks/seq/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp @@ -0,0 +1,96 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp" + +TEST(chizhov_m_max_values_by_columns_matrix_seq, test_pipeline_run) { + int columns = 2000; + int rows = 5000; + + // Create data + std::vector matrix(rows * columns, 1); + std::vector result(columns, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataSeq->outputs_count.emplace_back(result.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (size_t i = 0; i < result.size(); i++) { + EXPECT_EQ(1, result[0]); + } +} + +TEST(chizhov_m_max_values_by_columns_matrix_seq, test_task_run) { + int rows; + int columns; + + // Create data + rows = 5000; + columns = 2000; + std::vector matrix(rows * columns, 1); + std::vector res(columns, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(res.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (size_t i = 0; i < res.size(); i++) { + EXPECT_EQ(1, res[0]); + } +} \ No newline at end of file diff --git a/tasks/seq/chizhov_m_max_values_by_columns_matrix/src/ops_seq.cpp b/tasks/seq/chizhov_m_max_values_by_columns_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..e5c41912960 --- /dev/null +++ b/tasks/seq/chizhov_m_max_values_by_columns_matrix/src/ops_seq.cpp @@ -0,0 +1,63 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp" + +#include +#include +#include + +bool chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + cols = (int)*taskData->inputs[1]; + rows = (int)(taskData->inputs_count[0] / cols); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + + res_ = std::vector(cols, 0); + + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + if ((int)*taskData->inputs[1] == 0) { + return false; + } + if (taskData->inputs.empty() || taskData->inputs_count[0] <= 0) { + return false; + } + if (*taskData->inputs[1] != taskData->outputs_count[0]) { + return false; + } + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + for (int j = 0; j < cols; j++) { + int maxElement = input_[j]; + for (int i = 1; i < rows; i++) { + if (input_[i * cols + j] > maxElement) { + maxElement = input_[i * cols + j]; + } + } + res_[j] = maxElement; + } + + return true; +} + +bool chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + for (int j = 0; j < cols; j++) { + reinterpret_cast(taskData->outputs[0])[j] = res_[j]; + } + + return true; +} \ No newline at end of file From aa01006a7bf4f8b5067b0fce770ea26aefb270cf Mon Sep 17 00:00:00 2001 From: Arsenii Mironov <98156294+Napkin-AI@users.noreply.github.com> Date: Mon, 4 Nov 2024 04:41:27 +0400 Subject: [PATCH 063/155] =?UTF-8?q?=D0=9C=D0=B8=D1=80=D0=BE=D0=BD=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=90=D1=80=D1=81=D0=B5=D0=BD=D0=B8=D0=B9.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=203.=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0?= =?UTF-8?q?.=20(#55)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Описание последовательной задачи: 1. Инициализация переменной ```result```, в которой будет храниться максимальное значение вектора. Эта переменная изначально может быть установлена равной первому элементу вектора ```input_```. 2. Для каждого элемента выполняется сравнение его значения с текущим максимумом ```result```. 3. Если значение текущего элемента больше, чем значение переменной, то текущий максимум обновляется. 4. После проверки всех элементов вектора, в переменной ```result``` будет храниться максимальное значение. ### Описание MPI задачи: 1. **Разделение данных** Исходный вектор делится на фрагменты. Размеры каждого блока ```delta``` одинаковые и равны частному количества элементов и количеству процессов с округлением вверх. Если количество элементов не делится нацело на количество процессов, то искуственно добавляем несколько элементов в вектор, равные минимальному целочисленному числу, причём их количество опеределяется остатком от деления. Перессылка элементов вектора по процессам осуществляется методами класса ```boost::mpi::communicator```: ```send``` и ```recv```. Отправка ```delta``` осуществляется при помощи функции ```broadcast```. 2. **Обработка фрагментов** Каждый процесс выполняет поиск максимального значения в своем фрагменте вектора, используя алгоритм, аналогичный описанному в последовательной задаче. 3. **Сбор результатов**: Cбор результатов осуществляется при помощи функции ```reduce``` с оператором __maximum__. В результате выполнения операции `reduce` в переменной `result` будет храниться максимальное значение из всех фрагментов. --- .../func_tests/main.cpp | 370 ++++++++++++++++++ .../include/ops_mpi.hpp | 45 +++ .../perf_tests/main.cpp | 101 +++++ .../src/ops_mpi.cpp | 110 ++++++ .../func_tests/main.cpp | 173 ++++++++ .../include/ops_seq.hpp | 22 ++ .../perf_tests/main.cpp | 90 +++++ .../src/ops_seq.cpp | 39 ++ 8 files changed, 950 insertions(+) create mode 100644 tasks/mpi/mironov_a_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/mironov_a_max_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/mironov_a_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/mironov_a_max_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/mironov_a_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/mironov_a_max_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/mironov_a_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/mironov_a_max_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/mironov_a_max_of_vector_elements/func_tests/main.cpp b/tasks/mpi/mironov_a_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..662af77dc9f --- /dev/null +++ b/tasks/mpi/mironov_a_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,370 @@ +#include + +#include +#include +#include +#include + +#include "mpi/mironov_a_max_of_vector_elements/include/ops_mpi.hpp" + +namespace mironov_a_max_of_vector_elements_mpi { + +std::vector get_random_vector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 10000; + } + return vec; +} + +} // namespace mironov_a_max_of_vector_elements_mpi + +TEST(mironov_a_max_of_vector_elements_mpi, Test_Max_1) { + boost::mpi::communicator world; + // Create TaskData + std::vector global_vec; + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + // Create TaskData + const int count = 10000; + global_vec.resize(count); + for (int i = 0; i < count; ++i) { + global_vec[i] = i; + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + mironov_a_max_of_vector_elements_mpi::MaxVectorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + mironov_a_max_of_vector_elements_mpi::MaxVectorSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(mironov_a_max_of_vector_elements_mpi, Test_Max_2) { + boost::mpi::communicator world; + // Create TaskData + std::vector global_vec; + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 1; + global_vec.resize(count, -100000000); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + mironov_a_max_of_vector_elements_mpi::MaxVectorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + mironov_a_max_of_vector_elements_mpi::MaxVectorSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(mironov_a_max_of_vector_elements_mpi, Test_Max_3) { + boost::mpi::communicator world; + // Create TaskData + std::vector global_vec; + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 1000000; + const int start = -7890000; + global_vec.resize(count); + for (int i = 0, j = start; i < count; ++i, j += 9) { + global_vec[i] = j; + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + mironov_a_max_of_vector_elements_mpi::MaxVectorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + mironov_a_max_of_vector_elements_mpi::MaxVectorSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(mironov_a_max_of_vector_elements_mpi, Test_Max_4) { + boost::mpi::communicator world; + // Create TaskData + std::vector global_vec; + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 1000000; + const int start = -7890000; + global_vec.resize(count); + for (int i = count - 1, j = start; i >= 0; --i, j += 4) { + global_vec[i] = j; + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + mironov_a_max_of_vector_elements_mpi::MaxVectorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + mironov_a_max_of_vector_elements_mpi::MaxVectorSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(mironov_a_max_of_vector_elements_mpi, Test_Max_5) { + boost::mpi::communicator world; + // Create TaskData + std::vector global_vec; + std::vector global_max(1); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 100; + global_vec.resize(count, 0); + for (int i = 1; i < 100; i += 2) { + global_vec[i] = INT_MAX; + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + mironov_a_max_of_vector_elements_mpi::MaxVectorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + mironov_a_max_of_vector_elements_mpi::MaxVectorSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(mironov_a_max_of_vector_elements_mpi, Test_Max_6) { + boost::mpi::communicator world; + // Create TaskData + std::vector global_vec; + std::vector global_max(1); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 100; + global_vec = mironov_a_max_of_vector_elements_mpi::get_random_vector(count); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + mironov_a_max_of_vector_elements_mpi::MaxVectorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + mironov_a_max_of_vector_elements_mpi::MaxVectorSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(mironov_a_max_of_vector_elements_mpi, Test_Max_7) { + boost::mpi::communicator world; + // Create TaskData + std::vector global_vec; + std::vector global_max(1); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 80640; + global_vec = mironov_a_max_of_vector_elements_mpi::get_random_vector(count); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + mironov_a_max_of_vector_elements_mpi::MaxVectorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + mironov_a_max_of_vector_elements_mpi::MaxVectorSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(mironov_a_max_of_vector_elements_mpi, Wrong_Input) { + boost::mpi::communicator world; + // Create TaskData + std::vector global_vec; + std::vector global_max(1); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + mironov_a_max_of_vector_elements_mpi::MaxVectorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} diff --git a/tasks/mpi/mironov_a_max_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/mironov_a_max_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..d9abc322e15 --- /dev/null +++ b/tasks/mpi/mironov_a_max_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace mironov_a_max_of_vector_elements_mpi { + +class MaxVectorSequential : public ppc::core::Task { + public: + explicit MaxVectorSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int result_{}; +}; + +class MaxVectorMPI : public ppc::core::Task { + public: + explicit MaxVectorMPI(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int result_{}; + unsigned int delta = 0u; + boost::mpi::communicator world; +}; + +} // namespace mironov_a_max_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/mironov_a_max_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/mironov_a_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..c36779c0e7c --- /dev/null +++ b/tasks/mpi/mironov_a_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,101 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/mironov_a_max_of_vector_elements/include/ops_mpi.hpp" + +TEST(mironov_a_max_of_vector_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int gold; + if (world.rank() == 0) { + const int count = 200000000; + const int start = -789000000; + gold = start + 5 * (count - 1); + global_vec.resize(count); + for (int i = 0, j = start; i < count; ++i, j += 5) { + global_vec[i] = j; + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(gold, global_max[0]); + } +} + +TEST(mironov_a_max_of_vector_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int gold; + if (world.rank() == 0) { + const int count = 200000000; + const int start = -789000000; + gold = start + 5 * (count - 1); + global_vec.resize(count); + for (int i = 0, j = start; i < count; ++i, j += 5) { + global_vec[i] = j; + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(gold, global_max[0]); + } +} diff --git a/tasks/mpi/mironov_a_max_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/mironov_a_max_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..34640ffc4c8 --- /dev/null +++ b/tasks/mpi/mironov_a_max_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,110 @@ +#include "mpi/mironov_a_max_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool mironov_a_max_of_vector_elements_mpi::MaxVectorSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + int* it = reinterpret_cast(taskData->inputs[0]); + std::copy(it, it + taskData->inputs_count[0], input_.begin()); + result_ = input_[0]; + return true; +} + +bool mironov_a_max_of_vector_elements_mpi::MaxVectorSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs_count[0] > 0) && (taskData->outputs_count[0] == 1); +} + +bool mironov_a_max_of_vector_elements_mpi::MaxVectorSequential::run() { + internal_order_test(); + + result_ = input_[0]; + for (size_t it = 1; it < input_.size(); ++it) { + if (result_ < input_[it]) { + result_ = input_[it]; + } + } + return true; +} + +bool mironov_a_max_of_vector_elements_mpi::MaxVectorSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; +} + +bool mironov_a_max_of_vector_elements_mpi::MaxVectorMPI::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + if (taskData->inputs_count[0] % world.size() != 0u) { + delta++; + } + + // Init vector + int* it = reinterpret_cast(taskData->inputs[0]); + input_ = std::vector(static_cast(delta) * world.size(), INT_MIN); + std::copy(it, it + taskData->inputs_count[0], input_.begin()); + + // Init value for output + result_ = input_[0]; + } + + return true; +} + +bool mironov_a_max_of_vector_elements_mpi::MaxVectorMPI::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output & input + return (taskData->inputs_count[0] > 0) && (taskData->outputs_count[0] == 1); + } + return true; +} + +bool mironov_a_max_of_vector_elements_mpi::MaxVectorMPI::run() { + internal_order_test(); + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // probably better to use isend + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta); + } + } + + local_input_ = std::vector(delta, INT_MIN); + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + // Init value for result + int local_res = local_input_[0]; + for (size_t it = 1; it < local_input_.size(); ++it) { + if (local_res < local_input_[it]) { + local_res = local_input_[it]; + } + } + reduce(world, local_res, result_, boost::mpi::maximum(), 0); + return true; +} + +bool mironov_a_max_of_vector_elements_mpi::MaxVectorMPI::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result_; + } + return true; +} diff --git a/tasks/seq/mironov_a_max_of_vector_elements/func_tests/main.cpp b/tasks/seq/mironov_a_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..df4423bed23 --- /dev/null +++ b/tasks/seq/mironov_a_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,173 @@ +#include + +#include + +#include "seq/mironov_a_max_of_vector_elements/include/ops_seq.hpp" + +TEST(mironov_a_max_of_vector_elements_seq, Test_Max_1) { + const int count = 10000; + const int gold = 9999; + + // Create data + std::vector in(count); + std::vector out(1); + for (int i = 0; i < count; ++i) { + in[i] = i; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + mironov_a_max_of_vector_elements_seq::MaxVectorSequential MaxVectorSequential(taskDataSeq); + ASSERT_EQ(MaxVectorSequential.validation(), true); + MaxVectorSequential.pre_processing(); + MaxVectorSequential.run(); + MaxVectorSequential.post_processing(); + ASSERT_EQ(gold, out[0]); +} + +TEST(mironov_a_max_of_vector_elements_seq, Test_Max_2) { + const int count = 1; + const int gold = -100000000; + + // Create data + std::vector in(count, -100000000); + std::vector out(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + mironov_a_max_of_vector_elements_seq::MaxVectorSequential MaxVectorSequential(taskDataSeq); + ASSERT_EQ(MaxVectorSequential.validation(), true); + MaxVectorSequential.pre_processing(); + MaxVectorSequential.run(); + MaxVectorSequential.post_processing(); + ASSERT_EQ(gold, out[0]); +} + +TEST(mironov_a_max_of_vector_elements_seq, Test_Max_3) { + constexpr int count = 10000000; + constexpr int start = -7890000; + constexpr int gold = start + 9 * (count - 1); + + // Create data + std::vector in(count); + std::vector out(1); + for (int i = 0, j = start; i < count; ++i, j += 9) { + in[i] = j; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + mironov_a_max_of_vector_elements_seq::MaxVectorSequential MaxVectorSequential(taskDataSeq); + ASSERT_EQ(MaxVectorSequential.validation(), true); + MaxVectorSequential.pre_processing(); + MaxVectorSequential.run(); + MaxVectorSequential.post_processing(); + ASSERT_EQ(gold, out[0]); +} + +TEST(mironov_a_max_of_vector_elements_seq, Test_Max_4) { + constexpr int count = 10000000; + constexpr int start = -7890000; + constexpr int gold = start + 4 * (count - 1); + + // Create data + std::vector in(count); + std::vector out(1); + for (int i = count - 1, j = start; i >= 0; --i, j += 4) { + in[i] = j; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + mironov_a_max_of_vector_elements_seq::MaxVectorSequential MaxVectorSequential(taskDataSeq); + ASSERT_EQ(MaxVectorSequential.validation(), true); + MaxVectorSequential.pre_processing(); + MaxVectorSequential.run(); + MaxVectorSequential.post_processing(); + ASSERT_EQ(gold, out[0]); +} + +TEST(mironov_a_max_of_vector_elements_seq, Test_Max_5) { + const int count = 100; + const int gold = INT_MAX; + + // Create data + std::vector in(count, 0); + std::vector out(1); + for (int i = 1; i < 100; i += 2) { + in[i] = INT_MAX; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + mironov_a_max_of_vector_elements_seq::MaxVectorSequential MaxVectorSequential(taskDataSeq); + ASSERT_EQ(MaxVectorSequential.validation(), true); + MaxVectorSequential.pre_processing(); + MaxVectorSequential.run(); + MaxVectorSequential.post_processing(); + ASSERT_EQ(gold, out[0]); +} + +TEST(mironov_a_max_of_vector_elements_seq, Wrong_Input_1) { + // Create data + std::vector in; + std::vector out(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + mironov_a_max_of_vector_elements_seq::MaxVectorSequential MaxVectorSequential(taskDataSeq); + ASSERT_EQ(MaxVectorSequential.validation(), false); +} + +TEST(mironov_a_max_of_vector_elements_seq, Wrong_Input_2) { + // Create data + std::vector in(3, 5); + std::vector out; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + mironov_a_max_of_vector_elements_seq::MaxVectorSequential MaxVectorSequential(taskDataSeq); + ASSERT_EQ(MaxVectorSequential.validation(), false); +} diff --git a/tasks/seq/mironov_a_max_of_vector_elements/include/ops_seq.hpp b/tasks/seq/mironov_a_max_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..ddb07551827 --- /dev/null +++ b/tasks/seq/mironov_a_max_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace mironov_a_max_of_vector_elements_seq { + +class MaxVectorSequential : public ppc::core::Task { + public: + explicit MaxVectorSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int result_{}; +}; + +} // namespace mironov_a_max_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/mironov_a_max_of_vector_elements/perf_tests/main.cpp b/tasks/seq/mironov_a_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..75c168c971f --- /dev/null +++ b/tasks/seq/mironov_a_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,90 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/mironov_a_max_of_vector_elements/include/ops_seq.hpp" + +TEST(mironov_a_max_of_vector_elements_seq, test_pipeline_run) { + constexpr int count = 20000000; + constexpr int start = -789000000; + constexpr int gold = start + 5 * (count - 1); + + // Create data + std::vector in(count); + std::vector out(1); + for (int i = 0, j = start; i < count; ++i, j += 5) { + in[i] = j; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(gold, out[0]); +} + +TEST(mironov_a_max_of_vector_elements_seq, test_task_run) { + constexpr int count = 20000000; + constexpr int start = -789000000; + constexpr int gold = start + 5 * (count - 1); + + // Create data + std::vector in(count); + std::vector out(1); + for (int i = 0, j = start; i < count; ++i, j += 5) { + in[i] = j; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(gold, out[0]); +} diff --git a/tasks/seq/mironov_a_max_of_vector_elements/src/ops_seq.cpp b/tasks/seq/mironov_a_max_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..dccfd497357 --- /dev/null +++ b/tasks/seq/mironov_a_max_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,39 @@ +#include "seq/mironov_a_max_of_vector_elements/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool mironov_a_max_of_vector_elements_seq::MaxVectorSequential::pre_processing() { + internal_order_test(); + + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + int* it = reinterpret_cast(taskData->inputs[0]); + std::copy(it, it + taskData->inputs_count[0], input_.begin()); + result_ = input_[0]; + return true; +} + +bool mironov_a_max_of_vector_elements_seq::MaxVectorSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs_count[0] > 0) && (taskData->outputs_count[0] == 1); +} + +bool mironov_a_max_of_vector_elements_seq::MaxVectorSequential::run() { + internal_order_test(); + result_ = input_[0]; + for (size_t it = 1; it < input_.size(); ++it) { + if (result_ < input_[it]) { + result_ = input_[it]; + } + } + return true; +} + +bool mironov_a_max_of_vector_elements_seq::MaxVectorSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; +} From 1ebe5eab854b2cc0a7f0b7e417be7d3f2cb02b49 Mon Sep 17 00:00:00 2001 From: vitaliyvoroshilov <73603291+vitaliyvoroshilov@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:42:28 +0300 Subject: [PATCH 064/155] =?UTF-8?q?=D0=92=D0=BE=D1=80=D0=BE=D1=88=D0=B8?= =?UTF-8?q?=D0=BB=D0=BE=D0=B2=20=D0=92=D0=B8=D1=82=D0=B0=D0=BB=D0=B8=D0=B9?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2022.=20=D0=9F=D0=BE=D0=B4=D1=81?= =?UTF-8?q?=D1=87=D1=91=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D0=B1?= =?UTF-8?q?=D1=83=D0=BA=D0=B2=D0=B5=D0=BD=D0=BD=D1=8B=D1=85=20=D1=81=D0=B8?= =?UTF-8?q?=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE=D0=B2=20=D0=B2=20=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=BE=D0=BA=D0=B5.=20(#56)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Алгоритм решения задачи подсчёта числа буквенных символов в строке заключается в полном переборе имеющейся строки и формировании результата в виде целого числа. - В случае последовательного решения программа просто проходит по всей строке (точнее по vector), сравнивает ASCII-код текущего символа с ASCII-кодами английских букв (строчных и прописных) и, если совпадение найдено, увеличивает переменную-счётчик. После прохождения всего вектора переменная-счётчик содержит искомый результат. - В случае параллельного решения root-процесс распределяет имеющийся вектор символов равными частями себе и остальным процессам коммуникатора. Если вектор не получается разделить равными частями, все процессы получают равные части и root-процесс берёт себе ещё и оставшуюся "неполноценную" часть. После чего каждый процесс формирует свой локальный результат всё тем же методом полного перебора имеющегося фрагмента изначального вектора с сравнением ASCII-кодов. В конце root-процесс собирает глобальный результат как сумму локальных результатов всех процессов, полученный глобальный результат и есть искомое число. --- .../func_tests/main.cpp | 303 ++++++++++++++++++ .../include/ops_mpi.hpp | 44 +++ .../perf_tests/main.cpp | 137 ++++++++ .../src/ops_mpi.cpp | 106 ++++++ .../func_tests/main.cpp | 177 ++++++++++ .../include/ops_seq.hpp | 23 ++ .../perf_tests/main.cpp | 129 ++++++++ .../src/ops_seq.cpp | 37 +++ 8 files changed, 956 insertions(+) create mode 100644 tasks/mpi/voroshilov_v_num_of_alphabetic_chars/func_tests/main.cpp create mode 100644 tasks/mpi/voroshilov_v_num_of_alphabetic_chars/include/ops_mpi.hpp create mode 100644 tasks/mpi/voroshilov_v_num_of_alphabetic_chars/perf_tests/main.cpp create mode 100644 tasks/mpi/voroshilov_v_num_of_alphabetic_chars/src/ops_mpi.cpp create mode 100644 tasks/seq/voroshilov_v_num_of_alphabetic_chars/func_tests/main.cpp create mode 100644 tasks/seq/voroshilov_v_num_of_alphabetic_chars/include/ops_seq.hpp create mode 100644 tasks/seq/voroshilov_v_num_of_alphabetic_chars/perf_tests/main.cpp create mode 100644 tasks/seq/voroshilov_v_num_of_alphabetic_chars/src/ops_seq.cpp diff --git a/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/func_tests/main.cpp b/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/func_tests/main.cpp new file mode 100644 index 00000000000..c0de0f4c76a --- /dev/null +++ b/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/func_tests/main.cpp @@ -0,0 +1,303 @@ +#include + +#include +#include +#include +#include + +#include "mpi/voroshilov_v_num_of_alphabetic_chars/include/ops_mpi.hpp" + +std::vector genVecWithFixedAlphabeticsCount(int alphCount, size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vector(size); + int curCount = 0; + + std::string charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%&(){}[]*+-/"; + int charset_alphabet_size = 52; + + // Generate with absolutely random alphabetics count: + for (size_t i = 0; i < vector.size(); i++) { + int number = gen() % charset.length(); + vector[i] = charset[number]; + if (std::isalpha(vector[i]) != 0) { + curCount++; + } + } + + if (curCount < alphCount) { + // Change non-alphabetics to alphabetics to complete missing quantity + for (size_t i = 0; curCount < alphCount; i++) { + if (std::isalpha(vector[i]) == 0) { + int number = gen() % charset_alphabet_size; + vector[i] = charset[number]; + curCount++; + } + } + } else { + // Change alphabetics to non-alphabetics if there is an oversupply + for (size_t i = 0; curCount > alphCount; i++) { + if (std::isalpha(vector[i]) != 0) { + int number = gen() % (charset.length() - charset_alphabet_size) + charset_alphabet_size; + vector[i] = charset[number]; + curCount--; + } + } + } + + return vector; +} + +TEST(voroshilov_v_num_of_alphabetic_chars_mpi_func, test_without_alphabetic_chars_mpi) { + std::string str = "123456789-+*/=<>"; + int initial_num = 0; + int expected_num = 0; + + boost::mpi::communicator world; + std::vector global_vec(str.length()); + std::vector global_num(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::copy(str.begin(), str.end(), global_vec.begin()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_num.data())); + taskDataPar->outputs_count.emplace_back(global_num.size()); + } + + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskParallel alphabetCharsTaskParallel(taskDataPar); + ASSERT_EQ(alphabetCharsTaskParallel.validation(), true); + alphabetCharsTaskParallel.pre_processing(); + alphabetCharsTaskParallel.run(); + alphabetCharsTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Check if global_num is right + ASSERT_EQ(expected_num, global_num[0]); + + // Create data + std::vector reference_sum(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_num[0]); + } +} + +TEST(voroshilov_v_num_of_alphabetic_chars_mpi_func, test_with_lowercase_alphabetic_chars_mpi) { + std::string str = "123456789-+*/=<>aaabbcxyyzzz"; + int initial_num = 0; + int expected_num = 12; + + boost::mpi::communicator world; + std::vector global_vec(str.length()); + std::vector global_num(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::copy(str.begin(), str.end(), global_vec.begin()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_num.data())); + taskDataPar->outputs_count.emplace_back(global_num.size()); + } + + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskParallel alphabetCharsTaskParallel(taskDataPar); + ASSERT_EQ(alphabetCharsTaskParallel.validation(), true); + alphabetCharsTaskParallel.pre_processing(); + alphabetCharsTaskParallel.run(); + alphabetCharsTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Check if global_num is right + ASSERT_EQ(expected_num, global_num[0]); + + // Create data + std::vector reference_sum(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_num[0]); + } +} + +TEST(voroshilov_v_num_of_alphabetic_chars_mpi_func, test_with_uppercase_alphabetic_chars_mpi) { + std::string str = "123456789-+*/=<>AAABBCXYYZZZ"; + int initial_num = 0; + int expected_num = 12; + + boost::mpi::communicator world; + std::vector global_vec(str.length()); + std::vector global_num(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::copy(str.begin(), str.end(), global_vec.begin()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_num.data())); + taskDataPar->outputs_count.emplace_back(global_num.size()); + } + + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskParallel alphabetCharsTaskParallel(taskDataPar); + ASSERT_EQ(alphabetCharsTaskParallel.validation(), true); + alphabetCharsTaskParallel.pre_processing(); + alphabetCharsTaskParallel.run(); + alphabetCharsTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Check if global_num is right + ASSERT_EQ(expected_num, global_num[0]); + + // Create data + std::vector reference_sum(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_num[0]); + } +} + +TEST(voroshilov_v_num_of_alphabetic_chars_mpi_func, test_with_anycase_alphabetic_chars_mpi) { + std::string str = "123456789-+*/=<>aaabbcxyyzzzAAABBCXYYZZZ"; + int initial_num = 0; + int expected_num = 24; + + boost::mpi::communicator world; + std::vector global_vec(str.length()); + std::vector global_num(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::copy(str.begin(), str.end(), global_vec.begin()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_num.data())); + taskDataPar->outputs_count.emplace_back(global_num.size()); + } + + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskParallel alphabetCharsTaskParallel(taskDataPar); + ASSERT_EQ(alphabetCharsTaskParallel.validation(), true); + alphabetCharsTaskParallel.pre_processing(); + alphabetCharsTaskParallel.run(); + alphabetCharsTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Check if global_num is right + ASSERT_EQ(expected_num, global_num[0]); + + // Create data + std::vector reference_sum(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_num[0]); + } +} + +TEST(voroshilov_v_num_of_alphabetic_chars_mpi_func, test_with_random_generated_vector_mpi) { + int initial_num = 0; + int expected_num = 50; + size_t vec_size = 100; + + boost::mpi::communicator world; + std::vector global_vec(vec_size); + std::vector global_num(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = genVecWithFixedAlphabeticsCount(expected_num, vec_size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_num.data())); + taskDataPar->outputs_count.emplace_back(global_num.size()); + } + + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskParallel alphabetCharsTaskParallel(taskDataPar); + ASSERT_EQ(alphabetCharsTaskParallel.validation(), true); + alphabetCharsTaskParallel.pre_processing(); + alphabetCharsTaskParallel.run(); + alphabetCharsTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Check if global_num is right + ASSERT_EQ(expected_num, global_num[0]); + + // Create data + std::vector reference_sum(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_num[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/include/ops_mpi.hpp b/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/include/ops_mpi.hpp new file mode 100644 index 00000000000..80fa2a9e6cb --- /dev/null +++ b/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace voroshilov_v_num_of_alphabetic_chars_mpi { + +class AlphabetCharsTaskSequential : public ppc::core::Task { + public: + explicit AlphabetCharsTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_; +}; + +class AlphabetCharsTaskParallel : public ppc::core::Task { + public: + explicit AlphabetCharsTaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector local_input_; + int res_; + boost::mpi::communicator world; +}; + +} // namespace voroshilov_v_num_of_alphabetic_chars_mpi \ No newline at end of file diff --git a/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/perf_tests/main.cpp b/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/perf_tests/main.cpp new file mode 100644 index 00000000000..8dceb362eb1 --- /dev/null +++ b/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/perf_tests/main.cpp @@ -0,0 +1,137 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/voroshilov_v_num_of_alphabetic_chars/include/ops_mpi.hpp" + +std::vector genVecWithFixedAlphabeticsCount(int alphCount, size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vector(size); + int curCount = 0; + + std::string charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%&(){}[]*+-/"; + int charset_alphabet_size = 52; + + // Generate with absolutely random alphabetics count: + for (size_t i = 0; i < vector.size(); i++) { + int number = gen() % charset.length(); + vector[i] = charset[number]; + if (std::isalpha(vector[i]) != 0) { + curCount++; + } + } + + if (curCount < alphCount) { + // Change non-alphabetics to alphabetics to complete missing quantity + for (size_t i = 0; curCount < alphCount; i++) { + if (std::isalpha(vector[i]) == 0) { + int number = gen() % charset_alphabet_size; + vector[i] = charset[number]; + curCount++; + } + } + } else { + // Change alphabetics to non-alphabetics if there is an oversupply + for (size_t i = 0; curCount > alphCount; i++) { + if (std::isalpha(vector[i]) != 0) { + int number = gen() % (charset.length() - charset_alphabet_size) + charset_alphabet_size; + vector[i] = charset[number]; + curCount--; + } + } + } + + return vector; +} + +TEST(voroshilov_v_num_of_alphabetic_chars_mpi_perf, test_pipeline_run_mpi) { + int initial_num = 0; + int expected_num = 5000; + size_t vec_size = 10000; + + boost::mpi::communicator world; + std::vector global_vec(vec_size); + std::vector global_num(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_vec = genVecWithFixedAlphabeticsCount(expected_num, vec_size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_num.data())); + taskDataPar->outputs_count.emplace_back(global_num.size()); + } + + auto alphabetCharsTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(alphabetCharsTaskParallel->validation(), true); + alphabetCharsTaskParallel->pre_processing(); + alphabetCharsTaskParallel->run(); + alphabetCharsTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(alphabetCharsTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_num, global_num[0]); + } +} + +TEST(voroshilov_v_num_of_alphabetic_chars_mpi_perf, test_task_run_mpi) { + int initial_num = 0; + int expected_num = 5000; + size_t vec_size = 10000; + + boost::mpi::communicator world; + std::vector global_vec(vec_size); + std::vector global_num(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_vec = genVecWithFixedAlphabeticsCount(expected_num, vec_size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_num.data())); + taskDataPar->outputs_count.emplace_back(global_num.size()); + } + + auto alphabetCharsTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(alphabetCharsTaskParallel->validation(), true); + alphabetCharsTaskParallel->pre_processing(); + alphabetCharsTaskParallel->run(); + alphabetCharsTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(alphabetCharsTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_num, global_num[0]); + } +} diff --git a/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/src/ops_mpi.cpp b/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/src/ops_mpi.cpp new file mode 100644 index 00000000000..e9a4db9e1ee --- /dev/null +++ b/tasks/mpi/voroshilov_v_num_of_alphabetic_chars/src/ops_mpi.cpp @@ -0,0 +1,106 @@ +#include "mpi/voroshilov_v_num_of_alphabetic_chars/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskSequential::validation() { + internal_order_test(); + // Check count elements of input and output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + char* ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); + res_ = 0; + return true; +} + +bool voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); i++) { + if (std::isalpha(input_[i]) != 0) { // Check if it is alphabetic character + res_++; + } + } + return true; +} + +bool voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = res_; + return true; +} + +bool voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of input and output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskParallel::pre_processing() { + internal_order_test(); + // Init value for output + res_ = 0; + return true; +} + +bool voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskParallel::run() { + internal_order_test(); + + std::vector input_; + size_t part = 0; + size_t remainder = 0; + + if (world.rank() == 0) { + part = taskData->inputs_count[0] / world.size(); + remainder = taskData->inputs_count[0] % world.size(); + } + boost::mpi::broadcast(world, part, 0); + boost::mpi::broadcast(world, remainder, 0); + + if (world.rank() == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + char* ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + remainder + proc * part, part); + } + } + + local_input_ = std::vector(part); + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + remainder + part); + } else { + world.recv(0, 0, local_input_.data(), part); + } + + int local_res = 0; + for (size_t i = 0; i < local_input_.size(); i++) { + if (std::isalpha(local_input_[i]) != 0) { // Check if it is alphabetic character + local_res++; + } + } + boost::mpi::reduce(world, local_res, res_, std::plus(), 0); + return true; +} + +bool voroshilov_v_num_of_alphabetic_chars_mpi::AlphabetCharsTaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res_; + } + return true; +} diff --git a/tasks/seq/voroshilov_v_num_of_alphabetic_chars/func_tests/main.cpp b/tasks/seq/voroshilov_v_num_of_alphabetic_chars/func_tests/main.cpp new file mode 100644 index 00000000000..9c966e2eb97 --- /dev/null +++ b/tasks/seq/voroshilov_v_num_of_alphabetic_chars/func_tests/main.cpp @@ -0,0 +1,177 @@ +#include + +#include +#include +#include + +#include "seq/voroshilov_v_num_of_alphabetic_chars/include/ops_seq.hpp" + +std::vector genVecWithFixedAlphabeticsCount(int alphCount, size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vector(size); + int curCount = 0; + + std::string charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%&(){}[]*+-/"; + int charset_alphabet_size = 52; + + // Generate with absolutely random alphabetics count: + for (size_t i = 0; i < vector.size(); i++) { + int number = gen() % charset.length(); + vector[i] = charset[number]; + if (std::isalpha(vector[i]) != 0) { + curCount++; + } + } + + if (curCount < alphCount) { + // Change non-alphabetics to alphabetics to complete missing quantity + for (size_t i = 0; curCount < alphCount; i++) { + if (std::isalpha(vector[i]) == 0) { + int number = gen() % charset_alphabet_size; + vector[i] = charset[number]; + curCount++; + } + } + } else { + // Change alphabetics to non-alphabetics if there is an oversupply + for (size_t i = 0; curCount > alphCount; i++) { + if (std::isalpha(vector[i]) != 0) { + int number = gen() % (charset.length() - charset_alphabet_size) + charset_alphabet_size; + vector[i] = charset[number]; + curCount--; + } + } + } + + return vector; +} + +TEST(voroshilov_v_num_of_alphabetic_chars_seq_func, test_without_alphabetic_chars_seq) { + std::string str = "123456789-+*/=<>"; + int initial_num = 0; + int expected_num = 0; + + // Create data + std::vector in(str.length()); + std::copy(str.begin(), str.end(), in.begin()); + std::vector out(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_seq::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + ASSERT_EQ(expected_num, out[0]); +} + +TEST(voroshilov_v_num_of_alphabetic_chars_seq_func, test_with_lowercase_alphabetic_chars_seq) { + std::string str = "123456789-+*/=<>aaabbcxyyzzz"; + int initial_num = 0; + int expected_num = 12; + + // Create data + std::vector in(str.length()); + std::copy(str.begin(), str.end(), in.begin()); + std::vector out(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_seq::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + ASSERT_EQ(expected_num, out[0]); +} + +TEST(voroshilov_v_num_of_alphabetic_chars_seq_func, test_with_uppercase_alphabetic_chars_seq) { + std::string str = "123456789-+*/=<>AAABBCXYYZZZ"; + int initial_num = 0; + int expected_num = 12; + + // Create data + std::vector in(str.length()); + std::copy(str.begin(), str.end(), in.begin()); + std::vector out(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_seq::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + ASSERT_EQ(expected_num, out[0]); +} + +TEST(voroshilov_v_num_of_alphabetic_chars_seq_func, test_with_anycase_alphabetic_chars_seq) { + std::string str = "123456789-+*/=<>aaabbcxyyzzzAAABBCXYYZZZ"; + int initial_num = 0; + int expected_num = 24; + + // Create data + std::vector in(str.length()); + std::copy(str.begin(), str.end(), in.begin()); + std::vector out(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_seq::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + ASSERT_EQ(expected_num, out[0]); +} + +TEST(voroshilov_v_num_of_alphabetic_chars_seq_func, test_with_random_generated_vector_seq) { + int initial_num = 0; + int expected_num = 50; + size_t vec_size = 100; + + // Create data + std::vector in = genVecWithFixedAlphabeticsCount(expected_num, vec_size); + std::vector out(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + voroshilov_v_num_of_alphabetic_chars_seq::AlphabetCharsTaskSequential alphabetCharsTaskSequential(taskDataSeq); + ASSERT_EQ(alphabetCharsTaskSequential.validation(), true); + alphabetCharsTaskSequential.pre_processing(); + alphabetCharsTaskSequential.run(); + alphabetCharsTaskSequential.post_processing(); + ASSERT_EQ(expected_num, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/voroshilov_v_num_of_alphabetic_chars/include/ops_seq.hpp b/tasks/seq/voroshilov_v_num_of_alphabetic_chars/include/ops_seq.hpp new file mode 100644 index 00000000000..623d76ca730 --- /dev/null +++ b/tasks/seq/voroshilov_v_num_of_alphabetic_chars/include/ops_seq.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace voroshilov_v_num_of_alphabetic_chars_seq { + +class AlphabetCharsTaskSequential : public ppc::core::Task { + public: + explicit AlphabetCharsTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_; +}; + +} // namespace voroshilov_v_num_of_alphabetic_chars_seq \ No newline at end of file diff --git a/tasks/seq/voroshilov_v_num_of_alphabetic_chars/perf_tests/main.cpp b/tasks/seq/voroshilov_v_num_of_alphabetic_chars/perf_tests/main.cpp new file mode 100644 index 00000000000..96a79664f80 --- /dev/null +++ b/tasks/seq/voroshilov_v_num_of_alphabetic_chars/perf_tests/main.cpp @@ -0,0 +1,129 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/voroshilov_v_num_of_alphabetic_chars/include/ops_seq.hpp" + +std::vector genVecWithFixedAlphabeticsCount(int alphCount, size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vector(size); + int curCount = 0; + + std::string charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#$%&(){}[]*+-/"; + int charset_alphabet_size = 52; + + // Generate with absolutely random alphabetics count: + for (size_t i = 0; i < vector.size(); i++) { + int number = gen() % charset.length(); + vector[i] = charset[number]; + if (std::isalpha(vector[i]) != 0) { + curCount++; + } + } + + if (curCount < alphCount) { + // Change non-alphabetics to alphabetics to complete missing quantity + for (size_t i = 0; curCount < alphCount; i++) { + if (std::isalpha(vector[i]) == 0) { + int number = gen() % charset_alphabet_size; + vector[i] = charset[number]; + curCount++; + } + } + } else { + // Change alphabetics to non-alphabetics if there is an oversupply + for (size_t i = 0; curCount > alphCount; i++) { + if (std::isalpha(vector[i]) != 0) { + int number = gen() % (charset.length() - charset_alphabet_size) + charset_alphabet_size; + vector[i] = charset[number]; + curCount--; + } + } + } + + return vector; +} + +TEST(voroshilov_v_num_of_alphabetic_chars_seq_perf, test_pipeline_run_seq) { + int initial_num = 0; + int expected_num = 5000; + size_t vec_size = 10000; + + // Create data + std::vector in = genVecWithFixedAlphabeticsCount(expected_num, vec_size); + std::vector out(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto alphabetCharsTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(alphabetCharsTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_num, out[0]); +} + +TEST(voroshilov_v_num_of_alphabetic_chars_seq_perf, test_task_run_seq) { + int initial_num = 0; + int expected_num = 5000; + size_t vec_size = 10000; + + // Create data + std::vector in = genVecWithFixedAlphabeticsCount(expected_num, vec_size); + std::vector out(1, initial_num); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto alphabetCharsTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(alphabetCharsTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_num, out[0]); +} diff --git a/tasks/seq/voroshilov_v_num_of_alphabetic_chars/src/ops_seq.cpp b/tasks/seq/voroshilov_v_num_of_alphabetic_chars/src/ops_seq.cpp new file mode 100644 index 00000000000..a6369c3a4dd --- /dev/null +++ b/tasks/seq/voroshilov_v_num_of_alphabetic_chars/src/ops_seq.cpp @@ -0,0 +1,37 @@ +#include "seq/voroshilov_v_num_of_alphabetic_chars/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool voroshilov_v_num_of_alphabetic_chars_seq::AlphabetCharsTaskSequential::validation() { + internal_order_test(); + // Check count elements of input and output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool voroshilov_v_num_of_alphabetic_chars_seq::AlphabetCharsTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + char* ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); + res_ = 0; + return true; +} + +bool voroshilov_v_num_of_alphabetic_chars_seq::AlphabetCharsTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); i++) { + if (std::isalpha(input_[i]) != 0) { // Check if it is alphabetic character + res_++; + } + } + return true; +} + +bool voroshilov_v_num_of_alphabetic_chars_seq::AlphabetCharsTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = res_; + return true; +} From e60a2afa3de15a33c1bc71b03aafef352c337c09 Mon Sep 17 00:00:00 2001 From: MoiseevArt <112872776+MoiseevArt@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:43:06 +0300 Subject: [PATCH 065/155] =?UTF-8?q?=D0=9C=D0=BE=D0=B8=D1=81=D0=B5=D0=B5?= =?UTF-8?q?=D0=B2=20=D0=90=D1=80=D1=82=D1=91=D0=BC.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=208.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5?= =?UTF-8?q?=20=D0=BE=D1=82=D0=BB=D0=B8=D1=87=D0=B0=D1=8E=D1=89=D0=B8=D1=85?= =?UTF-8?q?=D1=81=D1=8F=20=D0=BF=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D1=8E=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8?= =?UTF-8?q?=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#63)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Сначала из входных данных создаётся вектор input_, с которым далее проводятся вычисления. Если вектор содержит менее двух элементов, возвращается ошибка. Алгоритм проходит по вектору, начиная со второго элемента, и для каждой пары соседних элементов вычисляется разница. Если разница больше текущего максимума, то значения максимума и индексов пары обновляются. Результаты записываются в taskData->outputs — пара значений и их индексы. Описание MPI задачи: Исходные данные делятся между процессами с помощью scatter. Главный процесс загружает весь вектор и делит его, учитывая остаток от деления на количество процессов, чтобы распределить данные как можно равномернее. Каждый процесс получает свой подмассив данных и проводит вычисления, находя пару соседних элементов с наибольшей разницей значений на локальном уровне. После завершения вычислений для каждой части данных выполняется операция reduce, чтобы собрать и сравнить локальные максимумы между всеми процессами и найти глобальную максимальную пару соседних элементов. Основной процесс проверяет, что индексы находятся в пределах исходного вектора и записывает результат в taskData->outputs. --- .../func_tests/main.cpp | 306 ++++++++++++++++++ .../include/ops_mpi.hpp | 141 ++++++++ .../perf_tests/main.cpp | 89 +++++ .../src/ops_mpi.cpp | 0 .../func_tests/main.cpp | 183 +++++++++++ .../include/ops_seq.hpp | 62 ++++ .../perf_tests/main.cpp | 86 +++++ .../src/ops_seq.cpp | 0 8 files changed, 867 insertions(+) create mode 100644 tasks/mpi/moiseev_a_most_different_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/mpi/moiseev_a_most_different_neighbor_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/moiseev_a_most_different_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/moiseev_a_most_different_neighbor_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/moiseev_a_most_different_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/seq/moiseev_a_most_different_neighbor_elements/include/ops_seq.hpp create mode 100644 tasks/seq/moiseev_a_most_different_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/seq/moiseev_a_most_different_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/moiseev_a_most_different_neighbor_elements/func_tests/main.cpp b/tasks/mpi/moiseev_a_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..0df55e7135e --- /dev/null +++ b/tasks/mpi/moiseev_a_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,306 @@ +#include + +#include "mpi/moiseev_a_most_different_neighbor_elements/include/ops_mpi.hpp" +#include "seq/moiseev_a_most_different_neighbor_elements/include/ops_seq.hpp" + +template +std::vector generateRandomVector(int size) { + std::vector vec(size); + for (int i = 0; i < size; ++i) { + vec[i] = static_cast(rand() % 100); + } + return vec; +} + +TEST(moiseev_a_most_different_neighbor_elements_mpi_test, TestVectorInt100) { + MPI_Comm comm = MPI_COMM_WORLD; + int rank; + MPI_Comm_rank(comm, &rank); + + std::vector global_vec; + std::vector global_values_out(2); + std::vector global_indices_out(2); + + auto taskDataPar = std::make_shared(); + + if (rank == 0) { + const int vector_size = 100; + global_vec = generateRandomVector(vector_size); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_values_out.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices_out.data())); + + taskDataPar->outputs_count.emplace_back(global_values_out.size()); + taskDataPar->outputs_count.emplace_back(global_indices_out.size()); + } + + moiseev_a_most_different_neighbor_elements_mpi::MostDifferentNeighborElementsParallel taskParallel(taskDataPar); + + ASSERT_TRUE(taskParallel.validation()); + taskParallel.pre_processing(); + taskParallel.run(); + taskParallel.post_processing(); + + if (rank == 0) { + std::vector reference_values_out(2); + std::vector reference_indices_out(2); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_values_out.data())); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_values_out.size()); + taskDataSeq->outputs_count.emplace_back(reference_indices_out.size()); + + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential taskSequential( + taskDataSeq); + ASSERT_TRUE(taskSequential.validation()); + taskSequential.pre_processing(); + taskSequential.run(); + taskSequential.post_processing(); + + ASSERT_EQ(reference_values_out[0], global_values_out[0]); + ASSERT_EQ(reference_values_out[1], global_values_out[1]); + ASSERT_EQ(reference_indices_out[0], global_indices_out[0]); + ASSERT_EQ(reference_indices_out[1], global_indices_out[1]); + } +} + +TEST(moiseev_a_most_different_neighbor_elements_mpi_test, TestVectorDouble100) { + MPI_Comm comm = MPI_COMM_WORLD; + int rank; + MPI_Comm_rank(comm, &rank); + + std::vector global_vec; + std::vector global_values_out(2); + std::vector global_indices_out(2); + + auto taskDataPar = std::make_shared(); + + if (rank == 0) { + const int vector_size = 100; + global_vec = generateRandomVector(vector_size); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_values_out.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices_out.data())); + + taskDataPar->outputs_count.emplace_back(global_values_out.size()); + taskDataPar->outputs_count.emplace_back(global_indices_out.size()); + } + + moiseev_a_most_different_neighbor_elements_mpi::MostDifferentNeighborElementsParallel taskParallel(taskDataPar); + + ASSERT_TRUE(taskParallel.validation()); + taskParallel.pre_processing(); + taskParallel.run(); + taskParallel.post_processing(); + + if (rank == 0) { + std::vector reference_values_out(2); + std::vector reference_indices_out(2); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_values_out.data())); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_values_out.size()); + taskDataSeq->outputs_count.emplace_back(reference_indices_out.size()); + + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential taskSequential( + taskDataSeq); + ASSERT_TRUE(taskSequential.validation()); + taskSequential.pre_processing(); + taskSequential.run(); + taskSequential.post_processing(); + + ASSERT_EQ(reference_values_out[0], global_values_out[0]); + ASSERT_EQ(reference_values_out[1], global_values_out[1]); + ASSERT_EQ(reference_indices_out[0], global_indices_out[0]); + ASSERT_EQ(reference_indices_out[1], global_indices_out[1]); + } +} + +TEST(moiseev_a_most_different_neighbor_elements_mpi_test, TestVectorFloat100) { + MPI_Comm comm = MPI_COMM_WORLD; + int rank; + MPI_Comm_rank(comm, &rank); + + std::vector global_vec; + std::vector global_values_out(2); + std::vector global_indices_out(2); + + auto taskDataPar = std::make_shared(); + + if (rank == 0) { + const int vector_size = 100; + global_vec = generateRandomVector(vector_size); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_values_out.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices_out.data())); + + taskDataPar->outputs_count.emplace_back(global_values_out.size()); + taskDataPar->outputs_count.emplace_back(global_indices_out.size()); + } + + moiseev_a_most_different_neighbor_elements_mpi::MostDifferentNeighborElementsParallel taskParallel(taskDataPar); + + ASSERT_TRUE(taskParallel.validation()); + taskParallel.pre_processing(); + taskParallel.run(); + taskParallel.post_processing(); + + if (rank == 0) { + std::vector reference_values_out(2); + std::vector reference_indices_out(2); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_values_out.data())); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_values_out.size()); + taskDataSeq->outputs_count.emplace_back(reference_indices_out.size()); + + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential taskSequential( + taskDataSeq); + ASSERT_TRUE(taskSequential.validation()); + taskSequential.pre_processing(); + taskSequential.run(); + taskSequential.post_processing(); + + ASSERT_EQ(reference_values_out[0], global_values_out[0]); + ASSERT_EQ(reference_values_out[1], global_values_out[1]); + ASSERT_EQ(reference_indices_out[0], global_indices_out[0]); + ASSERT_EQ(reference_indices_out[1], global_indices_out[1]); + } +} + +TEST(moiseev_a_most_different_neighbor_elements_mpi_test, TestVectorWithEqualElements) { + boost::mpi::communicator world; + std::vector global_vec(100, 5); + std::vector global_values_out(2); + std::vector global_indices_out(2); + + auto taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_values_out.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices_out.data())); + taskDataPar->outputs_count.emplace_back(global_values_out.size()); + taskDataPar->outputs_count.emplace_back(global_indices_out.size()); + } + + moiseev_a_most_different_neighbor_elements_mpi::MostDifferentNeighborElementsParallel taskParallel(taskDataPar); + + ASSERT_TRUE(taskParallel.validation()); + taskParallel.pre_processing(); + taskParallel.run(); + taskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_values_out[0], global_values_out[1]); + } +} + +TEST(moiseev_a_most_different_neighbor_elements_mpi_test, TestSmallVector) { + boost::mpi::communicator world; + std::vector global_vec = {3, 7}; + std::vector global_values_out(2); + std::vector global_indices_out(2); + + auto taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_values_out.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices_out.data())); + taskDataPar->outputs_count.emplace_back(global_values_out.size()); + taskDataPar->outputs_count.emplace_back(global_indices_out.size()); + } + + moiseev_a_most_different_neighbor_elements_mpi::MostDifferentNeighborElementsParallel taskParallel(taskDataPar); + + ASSERT_TRUE(taskParallel.validation()); + taskParallel.pre_processing(); + taskParallel.run(); + taskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(4, std::abs(global_vec[1] - global_vec[0])); + ASSERT_EQ(global_indices_out[0], 0u); + ASSERT_EQ(global_indices_out[1], 1u); + } +} + +TEST(moiseev_a_most_different_neighbor_elements_mpi_test, TestLargeRandomVector) { + boost::mpi::communicator world; + const int vector_size = 10000; + auto global_vec = generateRandomVector(vector_size); + std::vector global_values_out(2); + std::vector global_indices_out(2); + + auto taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_values_out.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices_out.data())); + taskDataPar->outputs_count.emplace_back(global_values_out.size()); + taskDataPar->outputs_count.emplace_back(global_indices_out.size()); + } + + moiseev_a_most_different_neighbor_elements_mpi::MostDifferentNeighborElementsParallel taskParallel(taskDataPar); + + ASSERT_TRUE(taskParallel.validation()); + taskParallel.pre_processing(); + taskParallel.run(); + taskParallel.post_processing(); +} + +TEST(moiseev_a_most_different_neighbor_elements_mpi_test, TestVectorWithNegativeValues) { + boost::mpi::communicator world; + std::vector global_vec = {-5, -10, 0, 3, -2}; + std::vector global_value_out(2); + std::vector global_indices_out(2); + + auto taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_value_out.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices_out.data())); + taskDataPar->outputs_count.emplace_back(global_value_out.size()); + taskDataPar->outputs_count.emplace_back(global_indices_out.size()); + } + + moiseev_a_most_different_neighbor_elements_mpi::MostDifferentNeighborElementsParallel taskParallel(taskDataPar); + + ASSERT_TRUE(taskParallel.validation()); + taskParallel.pre_processing(); + taskParallel.run(); + taskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_value_out[0], -10); + ASSERT_EQ(global_value_out[1], 0); + } +} \ No newline at end of file diff --git a/tasks/mpi/moiseev_a_most_different_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/moiseev_a_most_different_neighbor_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..1857f2fea40 --- /dev/null +++ b/tasks/mpi/moiseev_a_most_different_neighbor_elements/include/ops_mpi.hpp @@ -0,0 +1,141 @@ +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace moiseev_a_most_different_neighbor_elements_mpi { + +template +struct Result { + DataType diff; + int64_t l_index; + int64_t r_index; + + template + void serialize(Archive& ar, const unsigned int version) { + ar & diff; + ar & l_index; + ar & r_index; + } +}; + +template +class MostDifferentNeighborElementsParallel : public ppc::core::Task { + public: + explicit MostDifferentNeighborElementsParallel(std::shared_ptr taskData_) : Task(taskData_) {} + + bool pre_processing() override { + internal_order_test(); + rank_ = world.rank(); + size_ = world.size(); + return true; + } + + bool validation() override { + internal_order_test(); + return world.rank() != 0 || (taskData->outputs_count[0] == 2 && taskData->outputs_count[1] == 2); + } + + bool run() override { + internal_order_test(); + + if (rank_ == 0) { + total_size_ = taskData->inputs_count[0]; + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_.assign(tmp_ptr, tmp_ptr + total_size_); + } + boost::mpi::broadcast(world, total_size_, 0); + + chunk_size_ = total_size_ / size_; + remainder = total_size_ % size_; + std::vector sizes(world.size()); + std::vector displs(world.size()); + std::vector real_indicies(world.size()); + + if ((int)total_size_ < (int)world.size()) { + sizes[0] = total_size_; + } else { + for (int i = 0; i < world.size(); i++) { + if (i == world.size() - 1) { + if ((chunk_size_ + remainder) > 1) { + sizes[i] = chunk_size_ + remainder; + } + } else { + sizes[i] = chunk_size_ + 1; + } + if (i > 0) { + real_indicies[i] = real_indicies[i - 1] + (sizes[i - 1] - 1); + displs[i] = displs[i - 1] + (sizes[i - 1] - 1); + } + } + } + int actual_chunk_size = sizes[world.rank()]; + displ = real_indicies[world.rank()]; + + local_input_.resize(actual_chunk_size); + boost::mpi::scatterv(world, input_.data(), sizes, displs, local_input_.data(), actual_chunk_size, 0); + + DataType local_max_diff = 0; + int64_t local_l_index = 0; + int64_t local_r_index = 1; + + if (!local_input_.empty()) { + for (size_t i = 0; i < local_input_.size() - 1; ++i) { + DataType diff = std::abs(local_input_[i] - local_input_[i + 1]); + if (diff > local_max_diff) { + local_max_diff = diff; + local_l_index = static_cast(i); + local_r_index = static_cast(i + 1); + } + } + } + + int64_t global_l_index = local_l_index + displ; + int64_t global_r_index = local_r_index + displ; + + Result local_result = {local_max_diff, global_l_index, global_r_index}; + Result global_result; + + boost::mpi::reduce( + world, local_result, global_result, + [](const auto& a, const auto& b) { + return (a.diff > b.diff || (a.diff == b.diff && (a.l_index < b.l_index))) ? a : b; + }, + 0); + + if (rank_ == 0) { + l_elem_index = global_result.l_index; + r_elem_index = global_result.r_index; + } + return true; + } + + bool post_processing() override { + internal_order_test(); + + if (rank_ == 0) { + if (l_elem_index < input_.size() && r_elem_index < input_.size()) { + reinterpret_cast(taskData->outputs[0])[0] = input_[l_elem_index]; + reinterpret_cast(taskData->outputs[0])[1] = input_[r_elem_index]; + reinterpret_cast(taskData->outputs[1])[0] = static_cast(l_elem_index); + reinterpret_cast(taskData->outputs[1])[1] = static_cast(r_elem_index); + } + } + return true; + } + + private: + std::vector input_ = {}; + std::vector local_input_ = {}; + boost::mpi::communicator world; + size_t l_elem_index = 0; + size_t r_elem_index = 0; + size_t chunk_size_ = 0; + size_t displ = 0; + size_t total_size_ = 0; + int remainder = 0; + int rank_ = 0; + int size_ = 0; +}; +} // namespace moiseev_a_most_different_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/moiseev_a_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/moiseev_a_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..ca77e4f9a6c --- /dev/null +++ b/tasks/mpi/moiseev_a_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,89 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/moiseev_a_most_different_neighbor_elements/include/ops_mpi.hpp" +#include "seq/moiseev_a_most_different_neighbor_elements/include/ops_seq.hpp" + +TEST(moiseev_a_most_different_neighbor_elements_mpi_test, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_results(2, 0); + std::vector global_indices(2, 0); + + std::shared_ptr taskData = std::make_shared(); + int count_size_vector = 10000000; + if (world.rank() == 0) { + global_vec.resize(count_size_vector, 1); + taskData->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskData->inputs_count.push_back(global_vec.size()); + taskData->outputs.emplace_back(reinterpret_cast(global_results.data())); + taskData->outputs_count.push_back(global_results.size()); + taskData->outputs.emplace_back(reinterpret_cast(global_indices.data())); + taskData->outputs_count.push_back(global_indices.size()); + } + + auto task = + std::make_shared>( + taskData); + ASSERT_EQ(task->validation(), true); + task->pre_processing(); + task->run(); + task->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(task); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(static_cast(count_size_vector), global_vec.size()); + } +} + +TEST(moiseev_a_most_different_neighbor_elements_mpi_test, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_results(2, 0); + std::vector global_indices(2, 0); + + std::shared_ptr taskData = std::make_shared(); + int count_size_vector = 1000000; + if (world.rank() == 0) { + global_vec.resize(count_size_vector, 1); + taskData->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskData->inputs_count.push_back(global_vec.size()); + taskData->outputs.emplace_back(reinterpret_cast(global_results.data())); + taskData->outputs_count.push_back(global_results.size()); + taskData->outputs.emplace_back(reinterpret_cast(global_indices.data())); + taskData->outputs_count.push_back(global_indices.size()); + } + + auto task = + std::make_shared>( + taskData); + ASSERT_EQ(task->validation(), true); + task->pre_processing(); + task->run(); + task->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(task); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(static_cast(count_size_vector), global_vec.size()); + } +} diff --git a/tasks/mpi/moiseev_a_most_different_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/moiseev_a_most_different_neighbor_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tasks/seq/moiseev_a_most_different_neighbor_elements/func_tests/main.cpp b/tasks/seq/moiseev_a_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..b1f65a64b57 --- /dev/null +++ b/tasks/seq/moiseev_a_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,183 @@ +#include + +#include "seq/moiseev_a_most_different_neighbor_elements/include/ops_seq.hpp" + +TEST(moiseev_a_most_different_neighbor_elements_seq_test, check_int32_t) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = 2 * i; + } + in[234] = 0; + in[235] = 4000; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential testTask(taskData); + bool isValid = testTask.validation(); + EXPECT_EQ(isValid, true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + EXPECT_EQ(out[0], 0); + EXPECT_EQ(out[1], 4000); + EXPECT_EQ(out_index[0], 234ull); + EXPECT_EQ(out_index[1], 235ull); +} + +TEST(moiseev_a_most_different_neighbor_elements_seq_test, check_validate_func) { + std::vector in(125, 1); + std::vector out(2, 0); + std::vector out_index(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential testTask(taskData); + bool isValid = testTask.validation(); + EXPECT_EQ(isValid, false); +} + +TEST(moiseev_a_most_different_neighbor_elements_seq_test, check_double) { + std::vector in(25680, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = i; + } + in[189] = -1000.1; + in[190] = 9000.9; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential testTask(taskData); + bool isValid = testTask.validation(); + EXPECT_EQ(isValid, true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + EXPECT_NEAR(out[0], -1000.1, 1e-6); + EXPECT_NEAR(out[1], 9000.9, 1e-6); + EXPECT_EQ(out_index[0], 189ull); + EXPECT_EQ(out_index[1], 190ull); +} + +TEST(moiseev_a_most_different_neighbor_elements_seq_test, check_int8_t) { + std::vector in(250, -1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + if (i % 2 == 0) { + in[i] = -50; + } else { + in[i] = 50; + } + } + in[5] = 56; + in[6] = -56; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential testTask(taskData); + bool isValid = testTask.validation(); + EXPECT_EQ(isValid, true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + EXPECT_EQ(out[0], 56); + EXPECT_EQ(out[1], -56); + EXPECT_EQ(out_index[0], 5ull); + EXPECT_EQ(out_index[1], 6ull); +} + +TEST(moiseev_a_most_different_neighbor_elements_seq_test, check_int64_t) { + std::vector in(75836, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + if (i % 3 == 0) { + in[i] = 10; + } + if (i % 3 == 1) { + in[i] = 30; + } + if (i % 3 == 2) { + in[i] = 70; + } + } + in[20] = -1000; + in[21] = 1119; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential testTask(taskData); + bool isValid = testTask.validation(); + EXPECT_EQ(isValid, true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + EXPECT_EQ(out[0], -1000); + EXPECT_EQ(out[1], 1119); + EXPECT_EQ(out_index[0], 20ull); + EXPECT_EQ(out_index[1], 21ull); +} + +TEST(moiseev_a_most_different_neighbor_elements_seq_test, check_float) { + std::vector in(20, 1.f); + std::vector out(2, 0.f); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] += (i + 1.f) * 2.5f; + } + in[0] = 110.001f; + in[1] = -990.0025f; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential testTask(taskData); + bool isValid = testTask.validation(); + EXPECT_EQ(isValid, true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + EXPECT_NEAR(out[0], 110.001f, 1e-6); + EXPECT_NEAR(out[1], -990.0025f, 1e-6); + EXPECT_EQ(out_index[0], 0ull); + EXPECT_EQ(out_index[1], 1ull); +} diff --git a/tasks/seq/moiseev_a_most_different_neighbor_elements/include/ops_seq.hpp b/tasks/seq/moiseev_a_most_different_neighbor_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..cbd62f07e7a --- /dev/null +++ b/tasks/seq/moiseev_a_most_different_neighbor_elements/include/ops_seq.hpp @@ -0,0 +1,62 @@ +#include "core/task/include/task.hpp" + +namespace moiseev_a_most_different_neighbor_elements_seq { + +template +class MostDifferentNeighborElementsSequential : public ppc::core::Task { + public: + explicit MostDifferentNeighborElementsSequential(std::shared_ptr taskData_) + : Task(taskData_), taskData(taskData_) {} + + bool pre_processing() override { + internal_order_test(); + + auto tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_.assign(tmp_ptr, tmp_ptr + taskData->inputs_count[0]); + + return true; + } + + bool validation() override { + internal_order_test(); + return taskData->outputs_count[0] == 2 && taskData->outputs_count[1] == 2; + } + + bool run() override { + internal_order_test(); + if (input_.size() < 2) return false; + + DataType max_diff = 0; + l_elem_index = 0; + r_elem_index = 1; + + for (size_t i = 0; i + 1 < input_.size(); ++i) { + DataType diff = std::abs(input_[i] - input_[i + 1]); + if (diff > max_diff) { + max_diff = diff; + l_elem_index = i; + r_elem_index = i + 1; + } + } + return true; + } + + bool post_processing() override { + internal_order_test(); + if (taskData->outputs_count[0] >= 2 && taskData->outputs_count[1] >= 2) { + reinterpret_cast(taskData->outputs[0])[0] = input_[l_elem_index]; + reinterpret_cast(taskData->outputs[0])[1] = input_[r_elem_index]; + reinterpret_cast(taskData->outputs[1])[0] = static_cast(l_elem_index); + reinterpret_cast(taskData->outputs[1])[1] = static_cast(r_elem_index); + return true; + } + return false; + } + + private: + std::shared_ptr taskData; + std::vector input_; + size_t l_elem_index; + size_t r_elem_index; +}; +} // namespace moiseev_a_most_different_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/moiseev_a_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/seq/moiseev_a_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..d8910698f45 --- /dev/null +++ b/tasks/seq/moiseev_a_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,86 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/moiseev_a_most_different_neighbor_elements/include/ops_seq.hpp" + +TEST(moiseev_a_most_different_neighbor_elements_seq_test, test_pipeline_run) { + const int num_elements = 10000000; + + std::vector in(num_elements); + std::vector out(2, 0); + std::vector out_index(2, 0); + + for (size_t i = 0; i < num_elements; ++i) { + in[i] = i * 2; + } + in[0] = -5000; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + auto testTask = std::make_shared< + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential>(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], -5000); + ASSERT_EQ(out[1], 2); +} + +TEST(moiseev_a_most_different_neighbor_elements_seq_test, test_task_run) { + const int num_elements = 1000000; + + std::vector in(num_elements); + std::vector out(2, 0); + std::vector out_index(2, 0); + + for (size_t i = 0; i < num_elements; ++i) { + in[i] = i * 2; + } + in[0] = -5000; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + auto testTask = std::make_shared< + moiseev_a_most_different_neighbor_elements_seq::MostDifferentNeighborElementsSequential>(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], -5000); + ASSERT_EQ(out[1], 2); +} diff --git a/tasks/seq/moiseev_a_most_different_neighbor_elements/src/ops_seq.cpp b/tasks/seq/moiseev_a_most_different_neighbor_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..e69de29bb2d From 8e24f2e6b7fdd7217cfb7773af02377864b26b03 Mon Sep 17 00:00:00 2001 From: 22380240 <121048824+22380240@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:44:09 +0300 Subject: [PATCH 066/155] =?UTF-8?q?=E2=80=8E=D0=A0=D0=B0=D0=BC=D1=81=20?= =?UTF-8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=2023.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D0=B5=D1=82=20?= =?UTF-8?q?=D1=87=D0=B0=D1=81=D1=82=D0=BE=D1=82=D1=8B=20=D1=81=D0=B8=D0=BC?= =?UTF-8?q?=D0=B2=D0=BE=D0=BB=D0=B0=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE?= =?UTF-8?q?=D0=BA=D0=B5=20(#65)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Входные данные: строка и символ. Подсчет числа вхождений символа в строку с помощью std::count. Выходные данные: число вхождений символа в строку. Описание параллельной задачи: Входная строка равномерно делится между процессами. В случае если целочисленное деление длины строки на количество процессов невозможно, оставшиеся символы добавляются к подстроке последнего процесса. Каждый процесс считает число символов в своей подстроке. Полученные в каждом из процессов значения суммируются при помощи reduce. --------- Co-authored-by: ‎ <‎> --- .../rams_s_char_frequency/func_tests/main.cpp | 264 ++++++++++++++++++ .../rams_s_char_frequency/include/ops_mpi.hpp | 47 ++++ .../rams_s_char_frequency/perf_tests/main.cpp | 100 +++++++ .../mpi/rams_s_char_frequency/src/ops_mpi.cpp | 98 +++++++ .../rams_s_char_frequency/func_tests/main.cpp | 106 +++++++ .../rams_s_char_frequency/include/ops_seq.hpp | 25 ++ .../rams_s_char_frequency/perf_tests/main.cpp | 91 ++++++ .../seq/rams_s_char_frequency/src/ops_seq.cpp | 34 +++ 8 files changed, 765 insertions(+) create mode 100644 tasks/mpi/rams_s_char_frequency/func_tests/main.cpp create mode 100644 tasks/mpi/rams_s_char_frequency/include/ops_mpi.hpp create mode 100644 tasks/mpi/rams_s_char_frequency/perf_tests/main.cpp create mode 100644 tasks/mpi/rams_s_char_frequency/src/ops_mpi.cpp create mode 100644 tasks/seq/rams_s_char_frequency/func_tests/main.cpp create mode 100644 tasks/seq/rams_s_char_frequency/include/ops_seq.hpp create mode 100644 tasks/seq/rams_s_char_frequency/perf_tests/main.cpp create mode 100644 tasks/seq/rams_s_char_frequency/src/ops_seq.cpp diff --git a/tasks/mpi/rams_s_char_frequency/func_tests/main.cpp b/tasks/mpi/rams_s_char_frequency/func_tests/main.cpp new file mode 100644 index 00000000000..7a9db4566dc --- /dev/null +++ b/tasks/mpi/rams_s_char_frequency/func_tests/main.cpp @@ -0,0 +1,264 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/rams_s_char_frequency/include/ops_mpi.hpp" + +TEST(rams_s_char_frequency_mpi, several_occurrences_of_target) { + boost::mpi::communicator world; + std::string global_in = "abcdabcda"; + std::vector global_in_target(1, 'a'); + + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataPar->inputs_count.emplace_back(global_in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataPar->inputs_count.emplace_back(global_in_target.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + rams_s_char_frequency_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataSeq->inputs_count.emplace_back(global_in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataSeq->inputs_count.emplace_back(global_in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + rams_s_char_frequency_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(rams_s_char_frequency_mpi, no_occurrences_of_target) { + boost::mpi::communicator world; + std::string global_in = "bcdbcd"; + std::vector global_in_target(1, 'a'); + + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataPar->inputs_count.emplace_back(global_in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataPar->inputs_count.emplace_back(global_in_target.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + rams_s_char_frequency_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataSeq->inputs_count.emplace_back(global_in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataSeq->inputs_count.emplace_back(global_in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + rams_s_char_frequency_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(rams_s_char_frequency_mpi, empty_input_string) { + boost::mpi::communicator world; + std::string global_in; + std::vector global_in_target(1, 'a'); + + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataPar->inputs_count.emplace_back(global_in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataPar->inputs_count.emplace_back(global_in_target.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + rams_s_char_frequency_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataSeq->inputs_count.emplace_back(global_in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataSeq->inputs_count.emplace_back(global_in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + rams_s_char_frequency_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(rams_s_char_frequency_mpi, large_input_string) { + boost::mpi::communicator world; + std::string common_string = "abc"; + std::string global_in; + for (int i = 0; i < 9999; i++) { + global_in += common_string; + } + std::vector global_in_target(1, 'a'); + + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataPar->inputs_count.emplace_back(global_in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataPar->inputs_count.emplace_back(global_in_target.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + rams_s_char_frequency_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataSeq->inputs_count.emplace_back(global_in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataSeq->inputs_count.emplace_back(global_in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + rams_s_char_frequency_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(rams_s_char_frequency_mpi, random_input_string) { + boost::mpi::communicator world; + std::string chars = "1234567890abcdefghijklmnopqrstuvwxyz!@#$%^&*()"; + std::string global_in; + std::random_device dev; + std::mt19937 gen(dev()); + for (int i = 0; i < 9999; i++) { + global_in += chars[gen() % chars.length()]; + } + std::vector global_in_target(1, 'a'); + + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataPar->inputs_count.emplace_back(global_in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataPar->inputs_count.emplace_back(global_in_target.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + rams_s_char_frequency_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataSeq->inputs_count.emplace_back(global_in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataSeq->inputs_count.emplace_back(global_in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + rams_s_char_frequency_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} diff --git a/tasks/mpi/rams_s_char_frequency/include/ops_mpi.hpp b/tasks/mpi/rams_s_char_frequency/include/ops_mpi.hpp new file mode 100644 index 00000000000..f99f91b5319 --- /dev/null +++ b/tasks/mpi/rams_s_char_frequency/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace rams_s_char_frequency_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_; + char target_; + int res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_, local_input_; + char target_; + int res, local_res; + boost::mpi::communicator world; +}; + +} // namespace rams_s_char_frequency_mpi diff --git a/tasks/mpi/rams_s_char_frequency/perf_tests/main.cpp b/tasks/mpi/rams_s_char_frequency/perf_tests/main.cpp new file mode 100644 index 00000000000..2cdcc11d1e5 --- /dev/null +++ b/tasks/mpi/rams_s_char_frequency/perf_tests/main.cpp @@ -0,0 +1,100 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/rams_s_char_frequency/include/ops_mpi.hpp" + +TEST(rams_s_char_frequency_mpi_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + std::string common_string = "abc"; + std::string global_in; + for (int i = 0; i < 999999; i++) { + global_in += common_string; + } + std::vector global_in_target(1, 'a'); + std::vector global_out(1, 0); + int expected_count = 999999; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataPar->inputs_count.emplace_back(global_in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataPar->inputs_count.emplace_back(global_in_target.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_count, global_out[0]); + } +} + +TEST(rams_s_char_frequency_mpi_perf_test, test_task_run) { + boost::mpi::communicator world; + std::string common_string = "abc"; + std::string global_in; + for (int i = 0; i < 999999; i++) { + global_in += common_string; + } + std::vector global_in_target(1, 'a'); + std::vector global_out(1, 0); + int expected_count = 999999; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in.data())); + taskDataPar->inputs_count.emplace_back(global_in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_in_target.data())); + taskDataPar->inputs_count.emplace_back(global_in_target.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_count, global_out[0]); + } +} diff --git a/tasks/mpi/rams_s_char_frequency/src/ops_mpi.cpp b/tasks/mpi/rams_s_char_frequency/src/ops_mpi.cpp new file mode 100644 index 00000000000..0b407b53d2e --- /dev/null +++ b/tasks/mpi/rams_s_char_frequency/src/ops_mpi.cpp @@ -0,0 +1,98 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/rams_s_char_frequency/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#include "boost/mpi/collectives/scatterv.hpp" + +using namespace std::chrono_literals; + +bool rams_s_char_frequency_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + target_ = *reinterpret_cast(taskData->inputs[1]); + res = 0; + return true; +} + +bool rams_s_char_frequency_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] == 1 && taskData->outputs_count[0] == 1; +} + +bool rams_s_char_frequency_mpi::TestMPITaskSequential::run() { + internal_order_test(); + res = std::count(input_.begin(), input_.end(), target_); + return true; +} + +bool rams_s_char_frequency_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool rams_s_char_frequency_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + target_ = *reinterpret_cast(taskData->inputs[1]); + } + + res = 0; + local_res = 0; + return true; +} + +bool rams_s_char_frequency_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] == 1 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool rams_s_char_frequency_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int total = 0; + if (world.rank() == 0) { + total = taskData->inputs_count[0]; + } + broadcast(world, total, 0); + broadcast(world, target_, 0); + unsigned int delta = total / world.size(); + unsigned int overflow = total % world.size(); + + std::vector sizes(world.size(), delta); + sizes[world.size() - 1] = delta + overflow; + std::vector displs(world.size()); + for (int i = 1; i < world.size(); i++) { + displs[i] = displs[i - 1] + delta; + } + + unsigned int local_delta = sizes[world.rank()]; + local_input_.resize(local_delta); + + boost::mpi::scatterv(world, input_.data(), sizes, displs, local_input_.data(), local_delta, 0); + local_res = std::count(local_input_.begin(), local_input_.end(), target_); + reduce(world, local_res, res, std::plus(), 0); + return true; +} + +bool rams_s_char_frequency_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/rams_s_char_frequency/func_tests/main.cpp b/tasks/seq/rams_s_char_frequency/func_tests/main.cpp new file mode 100644 index 00000000000..8b8d684343a --- /dev/null +++ b/tasks/seq/rams_s_char_frequency/func_tests/main.cpp @@ -0,0 +1,106 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/rams_s_char_frequency/include/ops_seq.hpp" + +TEST(rams_s_char_frequency_seq, several_occurrences_of_target) { + std::string in = "abcdabcda"; + std::vector in_target(1, 'a'); + std::vector out(1, 0); + int expected_count = 3; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_target.data())); + taskDataSeq->inputs_count.emplace_back(in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rams_s_char_frequency_seq::CharFrequencyTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(rams_s_char_frequency_seq, no_occurrences_of_target) { + std::string in = "bcdbcd"; + std::vector in_target(1, 'a'); + std::vector out(1, 0); + int expected_count = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_target.data())); + taskDataSeq->inputs_count.emplace_back(in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rams_s_char_frequency_seq::CharFrequencyTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(rams_s_char_frequency_seq, empty_input_string) { + std::string in; + std::vector in_target(1, 'a'); + std::vector out(1, 0); + int expected_count = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_target.data())); + taskDataSeq->inputs_count.emplace_back(in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rams_s_char_frequency_seq::CharFrequencyTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(rams_s_char_frequency_seq, large_input_string) { + std::string common_string = "abc"; + std::string in; + for (int i = 0; i < 9999; i++) { + in += common_string; + } + std::vector in_target(1, 'a'); + std::vector out(1, 0); + int expected_count = 9999; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_target.data())); + taskDataSeq->inputs_count.emplace_back(in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rams_s_char_frequency_seq::CharFrequencyTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} diff --git a/tasks/seq/rams_s_char_frequency/include/ops_seq.hpp b/tasks/seq/rams_s_char_frequency/include/ops_seq.hpp new file mode 100644 index 00000000000..c55990148fd --- /dev/null +++ b/tasks/seq/rams_s_char_frequency/include/ops_seq.hpp @@ -0,0 +1,25 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace rams_s_char_frequency_seq { + +class CharFrequencyTaskSequential : public ppc::core::Task { + public: + explicit CharFrequencyTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_; + char target_; + int res; +}; + +} // namespace rams_s_char_frequency_seq diff --git a/tasks/seq/rams_s_char_frequency/perf_tests/main.cpp b/tasks/seq/rams_s_char_frequency/perf_tests/main.cpp new file mode 100644 index 00000000000..c1398ba089f --- /dev/null +++ b/tasks/seq/rams_s_char_frequency/perf_tests/main.cpp @@ -0,0 +1,91 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/rams_s_char_frequency/include/ops_seq.hpp" + +TEST(rams_s_char_frequency_seq_perf_test, test_pipeline_run) { + std::string common_string = "abc"; + std::string in; + for (int i = 0; i < 999999; i++) { + in += common_string; + } + std::vector in_target(1, 'a'); + std::vector out(1, 0); + int expected_count = 999999; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_target.data())); + taskDataSeq->inputs_count.emplace_back(in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(rams_s_char_frequency_seq_perf_test, test_task_run) { + std::string common_string = "abc"; + std::string in; + for (int i = 0; i < 999999; i++) { + in += common_string; + } + std::vector in_target(1, 'a'); + std::vector out(1, 0); + int expected_count = 999999; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_target.data())); + taskDataSeq->inputs_count.emplace_back(in_target.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_count, out[0]); +} diff --git a/tasks/seq/rams_s_char_frequency/src/ops_seq.cpp b/tasks/seq/rams_s_char_frequency/src/ops_seq.cpp new file mode 100644 index 00000000000..5d90703c63b --- /dev/null +++ b/tasks/seq/rams_s_char_frequency/src/ops_seq.cpp @@ -0,0 +1,34 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/rams_s_char_frequency/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool rams_s_char_frequency_seq::CharFrequencyTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + target_ = *reinterpret_cast(taskData->inputs[1]); + res = 0; + return true; +} + +bool rams_s_char_frequency_seq::CharFrequencyTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] == 1 && taskData->outputs_count[0] == 1; +} + +bool rams_s_char_frequency_seq::CharFrequencyTaskSequential::run() { + internal_order_test(); + res = std::count(input_.begin(), input_.end(), target_); + return true; +} + +bool rams_s_char_frequency_seq::CharFrequencyTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From 4c76b9959d041328954d2b20521cca27b1634718 Mon Sep 17 00:00:00 2001 From: Alexey-Solovev <149035736+Alexey-Solovev@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:46:02 +0300 Subject: [PATCH 067/155] =?UTF-8?q?=D0=A1=D0=BE=D0=BB=D0=BE=D0=B2=D1=8C?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B5=D0=B9.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2024.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87?= =?UTF-8?q?=D1=91=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=81=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.?= =?UTF-8?q?=20(#71)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательного алгоритма: Алгоритм проходит по строке и увеличивает переменную res на единицу, если символ является пробелом или точкой (в случае если слово в строке последнее) Описание MPI алгоритма: В нулевом процессе данные распределяются между всем процессами, каждый процесс выполняет подсчёт в отведенном ему участке строки и увеличивает переменную l_res на единицу, если символ является пробелом или точкой, затем все локальные результаты суммируются в переменную res --- .../solovev_a_word_count/func_tests/main.cpp | 260 ++++++++++++++++++ .../solovev_a_word_count/include/ops_mpi.hpp | 45 +++ .../solovev_a_word_count/perf_tests/main.cpp | 85 ++++++ .../mpi/solovev_a_word_count/src/ops_mpi.cpp | 94 +++++++ .../solovev_a_word_count/func_tests/main.cpp | 141 ++++++++++ .../solovev_a_word_count/include/ops_seq.hpp | 24 ++ .../solovev_a_word_count/perf_tests/main.cpp | 82 ++++++ .../seq/solovev_a_word_count/src/ops_seq.cpp | 35 +++ 8 files changed, 766 insertions(+) create mode 100644 tasks/mpi/solovev_a_word_count/func_tests/main.cpp create mode 100644 tasks/mpi/solovev_a_word_count/include/ops_mpi.hpp create mode 100644 tasks/mpi/solovev_a_word_count/perf_tests/main.cpp create mode 100644 tasks/mpi/solovev_a_word_count/src/ops_mpi.cpp create mode 100644 tasks/seq/solovev_a_word_count/func_tests/main.cpp create mode 100644 tasks/seq/solovev_a_word_count/include/ops_seq.hpp create mode 100644 tasks/seq/solovev_a_word_count/perf_tests/main.cpp create mode 100644 tasks/seq/solovev_a_word_count/src/ops_seq.cpp diff --git a/tasks/mpi/solovev_a_word_count/func_tests/main.cpp b/tasks/mpi/solovev_a_word_count/func_tests/main.cpp new file mode 100644 index 00000000000..6bd53f8aac4 --- /dev/null +++ b/tasks/mpi/solovev_a_word_count/func_tests/main.cpp @@ -0,0 +1,260 @@ +#include + +#include +#include +#include + +#include "mpi/solovev_a_word_count/include/ops_mpi.hpp" + +std::vector create_text(int quan_words) { + std::vector res; + std::string word = "word "; + std::string last = "word."; + for (int i = 0; i < quan_words - 1; i++) + for (unsigned long int symbol = 0; symbol < word.length(); symbol++) { + res.push_back(word[symbol]); + } + for (unsigned long int symbol = 0; symbol < last.length(); symbol++) { + res.push_back(last[symbol]); + } + return res; +} + +TEST(solovev_a_word_count_mpi, test_0_word) { + std::vector input = {}; + std::vector global_out(1, 0); + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + + solovev_a_word_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(solovev_a_word_count_mpi, test_5_word) { + std::vector input = create_text(5); + std::vector global_out(1, 0); + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + solovev_a_word_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + solovev_a_word_count_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_out[0], global_out[0]); + } +} + +TEST(solovev_a_word_count_mpi, test_120_word) { + std::vector input = create_text(120); + std::vector global_out(1, 0); + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + solovev_a_word_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + solovev_a_word_count_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_out[0], global_out[0]); + } +} + +TEST(solovev_a_word_count_mpi, test_300_words) { + std::vector input = create_text(300); + std::vector global_out(1, 0); + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + solovev_a_word_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + solovev_a_word_count_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_out[0], global_out[0]); + } +} + +TEST(solovev_a_word_count_mpi, test_480_words) { + std::vector input = create_text(480); + std::vector global_out(1, 0); + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + solovev_a_word_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + solovev_a_word_count_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_out[0], global_out[0]); + } +} + +TEST(solovev_a_word_count_mpi, test_600_words) { + std::vector input = create_text(600); + std::vector global_out(1, 0); + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + solovev_a_word_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + solovev_a_word_count_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_out[0], global_out[0]); + } +} + +TEST(solovev_a_word_count_mpi, test_1200_word) { + std::vector input = create_text(1200); + std::vector global_out(1, 0); + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + solovev_a_word_count_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + solovev_a_word_count_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_out[0], global_out[0]); + } +} diff --git a/tasks/mpi/solovev_a_word_count/include/ops_mpi.hpp b/tasks/mpi/solovev_a_word_count/include/ops_mpi.hpp new file mode 100644 index 00000000000..4aaf3c6db3a --- /dev/null +++ b/tasks/mpi/solovev_a_word_count/include/ops_mpi.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace solovev_a_word_count_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + boost::mpi::communicator world; + std::vector input_; + int res{}; + std::vector l_input_; + int l_res{}; +}; + +} // namespace solovev_a_word_count_mpi diff --git a/tasks/mpi/solovev_a_word_count/perf_tests/main.cpp b/tasks/mpi/solovev_a_word_count/perf_tests/main.cpp new file mode 100644 index 00000000000..904463aa917 --- /dev/null +++ b/tasks/mpi/solovev_a_word_count/perf_tests/main.cpp @@ -0,0 +1,85 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/solovev_a_word_count/include/ops_mpi.hpp" + +std::vector create_text(int quan_words) { + std::vector res; + std::string word = "word "; + std::string last = "word."; + for (int i = 0; i < quan_words - 1; i++) + for (unsigned long int symbol = 0; symbol < word.length(); symbol++) { + res.push_back(word[symbol]); + } + for (unsigned long int symbol = 0; symbol < last.length(); symbol++) { + res.push_back(last[symbol]); + } + return res; +} + +std::vector input_text = create_text(60000); + +TEST(solovev_a_word_count_mpi_perf_test, test_pipeline_run) { + std::vector input = input_text; + std::vector out(1, 0); + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], 60000); + } +} + +TEST(solovev_a_word_count_mpi_perf_test, test_task_run) { + std::vector input = input_text; + std::vector out(1, 0); + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], 60000); + } +} diff --git a/tasks/mpi/solovev_a_word_count/src/ops_mpi.cpp b/tasks/mpi/solovev_a_word_count/src/ops_mpi.cpp new file mode 100644 index 00000000000..a678080c99a --- /dev/null +++ b/tasks/mpi/solovev_a_word_count/src/ops_mpi.cpp @@ -0,0 +1,94 @@ +#include "mpi/solovev_a_word_count/include/ops_mpi.hpp" + +#include +#include +#include +#include + +namespace solovev_a_word_count_mpi { + +bool solovev_a_word_count_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + res = 0; + return true; +} + +bool solovev_a_word_count_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool solovev_a_word_count_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (char symbol : input_) { + if (symbol == ' ' || symbol == '.') { + res++; + } + } + return true; +} +bool solovev_a_word_count_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool solovev_a_word_count_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + input_ = std ::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + } + res = 0; + l_res = 0; + return true; +} + +bool solovev_a_word_count_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return (taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1); + } + return true; +} + +bool solovev_a_word_count_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + boost::mpi::broadcast(world, delta, 0); + if (world.rank() == 0) { + for (int p = 1; p < world.size(); p++) { + world.send(p, 0, input_.data() + p * delta, delta); + } + } + l_input_.resize(delta); + if (world.rank() == 0) { + l_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, l_input_.data(), delta); + } + for (char symbol : input_) { + if (symbol == ' ' || symbol == '.') { + l_res++; + } + } + boost::mpi::reduce(world, l_res, res, std::plus<>(), 0); + return true; +} + +bool solovev_a_word_count_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} + +} // namespace solovev_a_word_count_mpi diff --git a/tasks/seq/solovev_a_word_count/func_tests/main.cpp b/tasks/seq/solovev_a_word_count/func_tests/main.cpp new file mode 100644 index 00000000000..3cc0baa955d --- /dev/null +++ b/tasks/seq/solovev_a_word_count/func_tests/main.cpp @@ -0,0 +1,141 @@ +#include + +#include + +#include "seq/solovev_a_word_count/include/ops_seq.hpp" + +std::vector create_text(int quan_words) { + std::vector res; + std::string word = "word "; + std::string last = "word."; + for (int i = 0; i < quan_words - 1; i++) + for (unsigned long int symbol = 0; symbol < word.length(); symbol++) { + res.push_back(word[symbol]); + } + for (unsigned long int symbol = 0; symbol < last.length(); symbol++) { + res.push_back(last[symbol]); + } + return res; +} + +TEST(solovev_a_word_count_seq, test_0_word) { + std::vector input = {}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + solovev_a_word_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(solovev_a_word_count_seq, test_5_word) { + std::vector input = create_text(5); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + solovev_a_word_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 5); +} + +TEST(solovev_a_word_count_seq, test_120_word) { + std::vector input = create_text(120); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + solovev_a_word_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 120); +} + +TEST(solovev_a_word_count_seq, test_300_words) { + std::vector input = create_text(300); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + solovev_a_word_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 300); +} + +TEST(solovev_a_word_count_seq, test_480_words) { + std::vector input = create_text(480); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + solovev_a_word_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 480); +} + +TEST(solovev_a_word_count_seq, test_600_words) { + std::vector input = create_text(600); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + solovev_a_word_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 600); +} + +TEST(solovev_a_word_count_seq, test_1200_words) { + std::vector input = create_text(1200); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + solovev_a_word_count_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 1200); +} diff --git a/tasks/seq/solovev_a_word_count/include/ops_seq.hpp b/tasks/seq/solovev_a_word_count/include/ops_seq.hpp new file mode 100644 index 00000000000..ea5f1ade10f --- /dev/null +++ b/tasks/seq/solovev_a_word_count/include/ops_seq.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace solovev_a_word_count_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; +}; + +} // namespace solovev_a_word_count_seq diff --git a/tasks/seq/solovev_a_word_count/perf_tests/main.cpp b/tasks/seq/solovev_a_word_count/perf_tests/main.cpp new file mode 100644 index 00000000000..e3797a260fd --- /dev/null +++ b/tasks/seq/solovev_a_word_count/perf_tests/main.cpp @@ -0,0 +1,82 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/solovev_a_word_count/include/ops_seq.hpp" + +std::vector create_text(int quan_words) { + std::vector res; + std::string word = "word "; + std::string last = "word."; + for (int i = 0; i < quan_words - 1; i++) + for (unsigned long int symbol = 0; symbol < word.length(); symbol++) { + res.push_back(word[symbol]); + } + for (unsigned long int symbol = 0; symbol < last.length(); symbol++) { + res.push_back(last[symbol]); + } + return res; +} + +std::vector input_text = create_text(60000); + +TEST(solovev_a_word_count_seq_perf_test, test_pipeline_run) { + std::vector input = input_text; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(out[0], 60000); +} + +TEST(solovev_a_word_count_seq_perf_test, test_task_run) { + std::vector input = input_text; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(out[0], 60000); +} diff --git a/tasks/seq/solovev_a_word_count/src/ops_seq.cpp b/tasks/seq/solovev_a_word_count/src/ops_seq.cpp new file mode 100644 index 00000000000..353ff8ba507 --- /dev/null +++ b/tasks/seq/solovev_a_word_count/src/ops_seq.cpp @@ -0,0 +1,35 @@ +#include "seq/solovev_a_word_count/include/ops_seq.hpp" + +namespace solovev_a_word_count_seq { + +bool solovev_a_word_count_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + res = 0; + return true; +} + +bool solovev_a_word_count_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool solovev_a_word_count_seq::TestTaskSequential::run() { + internal_order_test(); + for (char symbol : input_) { + if (symbol == ' ' || symbol == '.') { + res++; + } + } + return true; +} + +bool solovev_a_word_count_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +} // namespace solovev_a_word_count_seq From 86810963614813af50bcad332f3c86fdaa3db59e Mon Sep 17 00:00:00 2001 From: Yaroslav <104485749+Yarik3008@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:46:49 +0300 Subject: [PATCH 068/155] =?UTF-8?q?=D0=9A=D0=BE=D0=BD=D0=B4=D1=80=D0=B0?= =?UTF-8?q?=D1=82=D1=8C=D0=B5=D0=B2=20=D0=AF=D1=80=D0=BE=D1=81=D0=BB=D0=B0?= =?UTF-8?q?=D0=B2.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2016.=20=D0=9D=D0=B0=D1=85?= =?UTF-8?q?=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA?= =?UTF-8?q?=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7?= =?UTF-8?q?=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20?= =?UTF-8?q?=D1=81=D1=82=D0=BE=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0?= =?UTF-8?q?=D1=82=D1=80=D0=B8=D1=86=D1=8B=20(#76)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи - входящая матрица при считывании транспонируется для удобства использования - последовательный построчный поиск максимума Описание MPI задачи - входящая матрица при считывании транспонируется для удобства использования - матрица построчно рассылается доступным процессам. Количество отправляемых строк зависит от ранга процесса (если ранг меньше остатка от деления количества строк на количество процессов, то отправляется step_ + 1) - каждый процесс последовательно построчно ищет максимумы в полученных данных - результаты от процессов отправляются в главный процесс, где объединяются и записываются в итоговый ответ --------- Co-authored-by: Vladislav Ermolaev <71265338+ermolaevv@users.noreply.github.com> --- .../func_tests/main.cpp | 125 +++++++++++++ .../include/ops_mpi.hpp | 51 ++++++ .../perf_tests/main.cpp | 127 ++++++++++++++ .../src/ops_mpi.cpp | 165 ++++++++++++++++++ .../func_tests/main.cpp | 107 ++++++++++++ .../include/ops_seq.hpp | 25 +++ .../perf_tests/main.cpp | 117 +++++++++++++ .../src/ops_seq.cpp | 55 ++++++ 8 files changed, 772 insertions(+) create mode 100644 tasks/mpi/kondratev_ya_max_col_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/kondratev_ya_max_col_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/kondratev_ya_max_col_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/kondratev_ya_max_col_matrix/func_tests/main.cpp create mode 100644 tasks/seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/kondratev_ya_max_col_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/kondratev_ya_max_col_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/kondratev_ya_max_col_matrix/func_tests/main.cpp b/tasks/mpi/kondratev_ya_max_col_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..a76cc30f3da --- /dev/null +++ b/tasks/mpi/kondratev_ya_max_col_matrix/func_tests/main.cpp @@ -0,0 +1,125 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp" + +std::vector> getRandomMatrix(uint32_t row, uint32_t col) { + int32_t low = -200; + int32_t high = 200; + + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> mtrx(row, std::vector(col)); + for (uint32_t i = 0; i < row; i++) { + for (uint32_t j = 0; j < col; j++) { + mtrx[i][j] = low + gen() % (high - low + 1); + } + } + return mtrx; +} + +void runTask(ppc::core::Task& task) { + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); +} + +void fillTaskData(std::shared_ptr& taskData, uint32_t row, uint32_t col, auto& mtrx, auto& res) { + for (auto& mtrxRow : mtrx) taskData->inputs.emplace_back(reinterpret_cast(mtrxRow.data())); + taskData->inputs_count.emplace_back(row); + taskData->inputs_count.emplace_back(col); + taskData->outputs.emplace_back(reinterpret_cast(res.data())); + taskData->outputs_count.emplace_back(res.size()); +} + +TEST(kondratev_ya_max_col_matrix_mpi, test_1) { + uint32_t row = 100; + uint32_t col = 100; + + boost::mpi::communicator world; + std::vector res(col); + std::vector> mtrx; + + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + mtrx = getRandomMatrix(row, col); + fillTaskData(taskDataPar, row, col, mtrx, res); + } + + kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + runTask(testMpiTaskParallel); + + if (world.rank() == 0) { + std::vector ref(col); + std::shared_ptr taskDataSeq = std::make_shared(); + fillTaskData(taskDataSeq, row, col, mtrx, ref); + + kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + runTask(testMpiTaskSequential); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); + } +} + +TEST(kondratev_ya_max_col_matrix_mpi, test_2) { + uint32_t row = 1000; + uint32_t col = 50; + + boost::mpi::communicator world; + std::vector res(col); + std::vector> mtrx; + + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + mtrx = getRandomMatrix(row, col); + fillTaskData(taskDataPar, row, col, mtrx, res); + } + + kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + runTask(testMpiTaskParallel); + if (world.rank() == 0) { + std::vector ref(col); + std::shared_ptr taskDataSeq = std::make_shared(); + fillTaskData(taskDataSeq, row, col, mtrx, ref); + + kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + runTask(testMpiTaskSequential); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); + } +} + +TEST(kondratev_ya_max_col_matrix_mpi, test_3) { + uint32_t row = 500; + uint32_t col = 1000; + + boost::mpi::communicator world; + std::vector res(col); + std::vector> mtrx; + + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + mtrx = getRandomMatrix(row, col); + fillTaskData(taskDataPar, row, col, mtrx, res); + } + + kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + runTask(testMpiTaskParallel); + + if (world.rank() == 0) { + std::vector ref(col); + std::shared_ptr taskDataSeq = std::make_shared(); + fillTaskData(taskDataSeq, row, col, mtrx, ref); + + kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + runTask(testMpiTaskSequential); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); + } +} \ No newline at end of file diff --git a/tasks/mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp b/tasks/mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..f5880d9ef44 --- /dev/null +++ b/tasks/mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp @@ -0,0 +1,51 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kondratev_ya_max_col_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector> local_input_; + std::vector res_; + + uint32_t row_; + uint32_t col_; + uint32_t step_; + uint32_t remain_; + boost::mpi::communicator world; +}; + +} // namespace kondratev_ya_max_col_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/kondratev_ya_max_col_matrix/perf_tests/main.cpp b/tasks/mpi/kondratev_ya_max_col_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..2ac3a61ffed --- /dev/null +++ b/tasks/mpi/kondratev_ya_max_col_matrix/perf_tests/main.cpp @@ -0,0 +1,127 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp" + +std::vector> getRandomMatrix(uint32_t row, uint32_t col) { + int32_t low = -200; + int32_t high = 200; + + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> mtrx(row, std::vector(col)); + for (uint32_t i = 0; i < row; i++) { + for (uint32_t j = 0; j < col; j++) { + mtrx[i][j] = low + gen() % (high - low + 1); + } + } + return mtrx; +} + +void insertRefValue(std::vector>& mtrx, int32_t ref) { + std::random_device dev; + std::mt19937 gen(dev()); + + uint32_t ind; + uint32_t row = mtrx.size(); + uint32_t col = mtrx[0].size(); + + for (uint32_t j = 0; j < col; j++) { + ind = gen() % row; + mtrx[ind][j] = ref; + } +} + +void runTask(ppc::core::Task& task) { + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); +} + +void fillTaskData(std::shared_ptr& taskData, uint32_t row, uint32_t col, auto& mtrx, auto& res) { + for (auto& mtrxRow : mtrx) taskData->inputs.emplace_back(reinterpret_cast(mtrxRow.data())); + taskData->inputs_count.emplace_back(row); + taskData->inputs_count.emplace_back(col); + taskData->outputs.emplace_back(reinterpret_cast(res.data())); + taskData->outputs_count.emplace_back(res.size()); +} + +TEST(kondratev_ya_max_col_matrix_mpi, test_pipeline_run) { + uint32_t row = 6000; + uint32_t col = 6000; + int32_t ref_val = INT_MAX; + + boost::mpi::communicator world; + std::vector res(col); + std::vector ref(col, ref_val); + std::vector> mtrx; + + auto taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + mtrx = getRandomMatrix(row, col); + insertRefValue(mtrx, ref_val); + fillTaskData(taskDataPar, row, col, mtrx, res); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + runTask(*testMpiTaskParallel); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); +} + +TEST(kondratev_ya_max_col_matrix_mpi_perf_test, test_task_run) { + uint32_t row = 6000; + uint32_t col = 6000; + int32_t ref_val = INT_MAX; + + boost::mpi::communicator world; + std::vector res(col); + std::vector ref(col, ref_val); + std::vector> mtrx; + + auto taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + mtrx = getRandomMatrix(row, col); + insertRefValue(mtrx, ref_val); + fillTaskData(taskDataPar, row, col, mtrx, res); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + runTask(*testMpiTaskParallel); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); +} diff --git a/tasks/mpi/kondratev_ya_max_col_matrix/src/ops_mpi.cpp b/tasks/mpi/kondratev_ya_max_col_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..313d0e034af --- /dev/null +++ b/tasks/mpi/kondratev_ya_max_col_matrix/src/ops_mpi.cpp @@ -0,0 +1,165 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + uint32_t row = taskData->inputs_count[0]; + uint32_t col = taskData->inputs_count[1]; + + std::vector tmp(row); + for (uint32_t i = 0; i < row; i++) { + tmp[i] = reinterpret_cast(taskData->inputs[i]); + } + + input_.resize(col, std::vector(row)); + for (uint32_t j = 0; j < col; j++) { + for (uint32_t i = 0; i < row; i++) { + input_[j][i] = tmp[i][j]; + } + } + res_.resize(col); + + return true; +} + +bool kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + return taskData->outputs_count[0] == taskData->inputs_count[1] && !taskData->outputs.empty() && + !taskData->inputs.empty(); +} + +bool kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + for (uint32_t i = 0; i < input_.size(); i++) { + res_[i] = *std::max_element(input_[i].begin(), input_[i].end()); + } + + return true; +} + +bool kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + auto* output_matrix = reinterpret_cast(taskData->outputs[0]); + std::copy(res_.begin(), res_.end(), output_matrix); + + return true; +} + +bool kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + row_ = taskData->inputs_count[0]; + col_ = taskData->inputs_count[1]; + + std::vector tmp(row_); + for (uint32_t i = 0; i < row_; i++) { + tmp[i] = reinterpret_cast(taskData->inputs[i]); + } + + input_.resize(col_, std::vector(row_)); + for (uint32_t j = 0; j < col_; j++) { + for (uint32_t i = 0; i < row_; i++) { + input_[j][i] = tmp[i][j]; + } + } + res_.resize(col_); + } + + return true; +} + +bool kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + return taskData->outputs_count[0] == taskData->inputs_count[1] && !taskData->outputs.empty() && + !taskData->inputs.empty(); + } + return true; +} + +bool kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + broadcast(world, row_, 0); + broadcast(world, col_, 0); + + step_ = col_ / world.size(); + remain_ = col_ % world.size(); + + uint32_t recvSize = 0; + + if (world.rank() == 0) { + uint32_t worldSize = world.size(); + uint32_t ind = step_; + if (remain_ > 0) ind++; + + for (uint32_t i = 1; i < worldSize; i++) { + recvSize = step_; + if (i < remain_) recvSize++; + + for (uint32_t j = 0; j < recvSize; j++) { + world.send(i, 0, input_[ind++]); + } + } + } + + recvSize = step_; + if (static_cast(world.rank()) < remain_) recvSize++; + local_input_.resize(recvSize, std::vector(row_)); + + if (world.rank() == 0) { + std::copy(input_.begin(), input_.begin() + recvSize, local_input_.begin()); + } else { + for (uint32_t i = 0; i < recvSize; i++) { + world.recv(0, 0, local_input_[i]); + } + } + + std::vector loc_max(local_input_.size()); + for (size_t i = 0; i < loc_max.size(); i++) { + loc_max[i] = *std::max_element(local_input_[i].begin(), local_input_[i].end()); + } + + if (world.rank() == 0) { + std::copy(loc_max.begin(), loc_max.end(), res_.begin()); + + std::vector sizes(world.size(), step_); + for (uint32_t i = 0; i < remain_; i++) sizes[i]++; + + uint32_t ind = sizes[0]; + for (int32_t i = 1; i < world.size(); i++) { + world.recv(i, 0, &res_[ind], sizes[i]); + ind += sizes[i]; + } + } else { + world.send(0, 0, loc_max.data(), loc_max.size()); + } + + return true; +} + +bool kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto* output = reinterpret_cast(taskData->outputs[0]); + std::copy(res_.begin(), res_.end(), output); + } + + return true; +} diff --git a/tasks/seq/kondratev_ya_max_col_matrix/func_tests/main.cpp b/tasks/seq/kondratev_ya_max_col_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..c7897ca095f --- /dev/null +++ b/tasks/seq/kondratev_ya_max_col_matrix/func_tests/main.cpp @@ -0,0 +1,107 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp" + +std::vector> getRandomMatrix(uint32_t row, uint32_t col) { + int32_t low = -200; + int32_t high = 200; + + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> mtrx(row, std::vector(col)); + for (uint32_t i = 0; i < row; i++) { + for (uint32_t j = 0; j < col; j++) { + mtrx[i][j] = low + gen() % (high - low + 1); + } + } + return mtrx; +} + +void insertRefValue(std::vector>& mtrx, int32_t ref) { + std::random_device dev; + std::mt19937 gen(dev()); + + uint32_t ind; + uint32_t row = mtrx.size(); + uint32_t col = mtrx[0].size(); + + for (uint32_t j = 0; j < col; j++) { + ind = gen() % row; + mtrx[ind][j] = ref; + } +} + +void runTask(ppc::core::Task& task) { + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); +} + +void fillTaskData(std::shared_ptr& taskData, uint32_t row, uint32_t col, auto& mtrx, auto& res) { + for (auto& mtrxRow : mtrx) taskData->inputs.emplace_back(reinterpret_cast(mtrxRow.data())); + taskData->inputs_count.emplace_back(row); + taskData->inputs_count.emplace_back(col); + taskData->outputs.emplace_back(reinterpret_cast(res.data())); + taskData->outputs_count.emplace_back(res.size()); +} + +TEST(kondratev_ya_max_col_matrix_seq, test_1) { + uint32_t row = 100; + uint32_t col = 100; + int32_t ref_val = INT_MAX; + + std::vector res(col); + std::vector ref(col, ref_val); + std::vector> mtrx = getRandomMatrix(row, col); + insertRefValue(mtrx, ref_val); + + auto taskDataSeq = std::make_shared(); + fillTaskData(taskDataSeq, row, col, mtrx, res); + + kondratev_ya_max_col_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + runTask(testTaskSequential); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); +} + +TEST(kondratev_ya_max_col_matrix_seq, test_2) { + uint32_t row = 1000; + uint32_t col = 50; + int32_t ref_val = INT_MAX; + + std::vector res(col); + std::vector ref(col, ref_val); + std::vector> mtrx = getRandomMatrix(row, col); + insertRefValue(mtrx, ref_val); + + auto taskDataSeq = std::make_shared(); + fillTaskData(taskDataSeq, row, col, mtrx, res); + + kondratev_ya_max_col_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + runTask(testTaskSequential); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); +} + +TEST(kondratev_ya_max_col_matrix_seq, test_3) { + uint32_t row = 500; + uint32_t col = 1000; + int32_t ref_val = INT_MAX; + std::vector res(col); + std::vector ref(col, ref_val); + std::vector> mtrx = getRandomMatrix(row, col); + insertRefValue(mtrx, ref_val); + + auto taskDataSeq = std::make_shared(); + fillTaskData(taskDataSeq, row, col, mtrx, res); + + kondratev_ya_max_col_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + runTask(testTaskSequential); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); +} \ No newline at end of file diff --git a/tasks/seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp b/tasks/seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..e37ed3478b4 --- /dev/null +++ b/tasks/seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp @@ -0,0 +1,25 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kondratev_ya_max_col_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace kondratev_ya_max_col_matrix_seq \ No newline at end of file diff --git a/tasks/seq/kondratev_ya_max_col_matrix/perf_tests/main.cpp b/tasks/seq/kondratev_ya_max_col_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..da20f13454b --- /dev/null +++ b/tasks/seq/kondratev_ya_max_col_matrix/perf_tests/main.cpp @@ -0,0 +1,117 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp" + +std::vector> getRandomMatrix(uint32_t row, uint32_t col) { + int32_t low = -200; + int32_t high = 200; + + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> mtrx(row, std::vector(col)); + for (uint32_t i = 0; i < row; i++) { + for (uint32_t j = 0; j < col; j++) { + mtrx[i][j] = low + gen() % (high - low + 1); + } + } + return mtrx; +} + +void insertRefValue(std::vector>& mtrx, int32_t ref) { + std::random_device dev; + std::mt19937 gen(dev()); + + uint32_t ind; + uint32_t row = mtrx.size(); + uint32_t col = mtrx[0].size(); + + for (uint32_t j = 0; j < col; j++) { + ind = gen() % row; + mtrx[ind][j] = ref; + } +} + +void fillTaskData(std::shared_ptr& taskData, uint32_t row, uint32_t col, auto& mtrx, auto& res) { + for (auto& mtrxRow : mtrx) taskData->inputs.emplace_back(reinterpret_cast(mtrxRow.data())); + taskData->inputs_count.emplace_back(row); + taskData->inputs_count.emplace_back(col); + taskData->outputs.emplace_back(reinterpret_cast(res.data())); + taskData->outputs_count.emplace_back(res.size()); +} + +TEST(kondratev_ya_max_col_matrix_seq, test_pipeline_run) { + uint32_t row = 6000; + uint32_t col = 6000; + int32_t ref_val = INT_MAX; + + std::vector res(col); + std::vector ref(col, ref_val); + std::vector> mtrx = getRandomMatrix(row, col); + insertRefValue(mtrx, ref_val); + + auto taskDataSeq = std::make_shared(); + fillTaskData(taskDataSeq, row, col, mtrx, res); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Set the number of runs as needed + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); +} + +TEST(kondratev_ya_max_col_matrix_seq, test_task_run) { + uint32_t row = 6000; + uint32_t col = 6000; + int32_t ref_val = INT_MAX; + + std::vector res(col); + std::vector ref(col, ref_val); + std::vector> mtrx = getRandomMatrix(row, col); + insertRefValue(mtrx, ref_val); + + auto taskDataSeq = std::make_shared(); + fillTaskData(taskDataSeq, row, col, mtrx, res); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); +} diff --git a/tasks/seq/kondratev_ya_max_col_matrix/src/ops_seq.cpp b/tasks/seq/kondratev_ya_max_col_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..84103c88d91 --- /dev/null +++ b/tasks/seq/kondratev_ya_max_col_matrix/src/ops_seq.cpp @@ -0,0 +1,55 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool kondratev_ya_max_col_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + uint32_t row = taskData->inputs_count[0]; + uint32_t col = taskData->inputs_count[1]; + + std::vector tmp(row); + for (uint32_t i = 0; i < row; i++) { + tmp[i] = reinterpret_cast(taskData->inputs[i]); + } + + input_.resize(col, std::vector(row)); + for (uint32_t j = 0; j < col; j++) { + for (uint32_t i = 0; i < row; i++) { + input_[j][i] = tmp[i][j]; + } + } + res_.resize(col); + + return true; +} + +bool kondratev_ya_max_col_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + + return taskData->outputs_count[0] == taskData->inputs_count[1] && !taskData->outputs.empty() && + !taskData->inputs.empty(); +} + +bool kondratev_ya_max_col_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + for (uint32_t i = 0; i < input_.size(); i++) { + res_[i] = *std::max_element(input_[i].begin(), input_[i].end()); + } + + return true; +} + +bool kondratev_ya_max_col_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + auto* output_matrix = reinterpret_cast(taskData->outputs[0]); + std::copy(res_.begin(), res_.end(), output_matrix); + + return true; +} From 48b54e851839c26575790844e6d4c9e819e61de6 Mon Sep 17 00:00:00 2001 From: Anastasia Kalyakina <112873960+kalyakinaa@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:48:07 +0300 Subject: [PATCH 069/155] =?UTF-8?q?=D0=9A=D0=B0=D0=BB=D1=8F=D0=BA=D0=B8?= =?UTF-8?q?=D0=BD=D0=B0=20=D0=90=D0=BD=D0=B0=D1=81=D1=82=D0=B0=D1=81=D0=B8?= =?UTF-8?q?=D1=8F.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=202.=20=D0=92=D1=8B=D1=87?= =?UTF-8?q?=D0=B8=D1=81=D0=BB=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=81=D1=80=D0=B5?= =?UTF-8?q?=D0=B4=D0=BD=D0=B5=D0=B3=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D1=8F=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0=20(#7?= =?UTF-8?q?7)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательного решения задачи: - Алгоритм последовательно проходит по всем элементам вектора, подсчитывая их сумму - Найденная сумма делится на количество элементов Описание параллельного решения задачи: - Входной вектор дополняется нулями соответственно количеству процессов - Данные по возможности равномерно распределяются с помощью функции scatterv - Каждый процесс подсчитывает сумму элементов переданного ему вектора - Полученные суммы объединяются сложением с помощью функции reduce - Итоговая сумма делится на исходное количество элементов (количество до дополнения нулями) --- .../func_tests/main.cpp | 251 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 ++++ .../perf_tests/main.cpp | 103 +++++++ .../kalyakina_a_average_value/src/ops_mpi.cpp | 112 ++++++++ .../func_tests/main.cpp | 159 +++++++++++ .../include/ops_seq.hpp | 26 ++ .../perf_tests/main.cpp | 104 ++++++++ .../kalyakina_a_average_value/src/ops_seq.cpp | 41 +++ 8 files changed, 843 insertions(+) create mode 100644 tasks/mpi/kalyakina_a_average_value/func_tests/main.cpp create mode 100644 tasks/mpi/kalyakina_a_average_value/include/ops_mpi.hpp create mode 100644 tasks/mpi/kalyakina_a_average_value/perf_tests/main.cpp create mode 100644 tasks/mpi/kalyakina_a_average_value/src/ops_mpi.cpp create mode 100644 tasks/seq/kalyakina_a_average_value/func_tests/main.cpp create mode 100644 tasks/seq/kalyakina_a_average_value/include/ops_seq.hpp create mode 100644 tasks/seq/kalyakina_a_average_value/perf_tests/main.cpp create mode 100644 tasks/seq/kalyakina_a_average_value/src/ops_seq.cpp diff --git a/tasks/mpi/kalyakina_a_average_value/func_tests/main.cpp b/tasks/mpi/kalyakina_a_average_value/func_tests/main.cpp new file mode 100644 index 00000000000..754e8957010 --- /dev/null +++ b/tasks/mpi/kalyakina_a_average_value/func_tests/main.cpp @@ -0,0 +1,251 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/kalyakina_a_average_value/include/ops_mpi.hpp" + +std::vector RandomVectorWithFixSum(int sum, const int& count) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector result_vector(count); + for (int i = 0; i < count - 1; i++) { + result_vector[i] = gen() % (std::min(sum, 255) - 1); + sum -= result_vector[i]; + } + result_vector[count - 1] = sum; + return result_vector; +} + +TEST(kalyakina_a_average_value_mpi, Test_Avg_10) { + boost::mpi::communicator world; + std::vector in{}; + std::vector out_mpi(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 10; + const int sum = 1000; + in = RandomVectorWithFixSum(sum, count); + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out_mpi.data())); + taskDataPar->outputs_count.emplace_back(out_mpi.size()); + } + + kalyakina_a_average_value_mpi::FindingAverageMPITaskParallel AvgMPITaskParallel(taskDataPar); + ASSERT_EQ(AvgMPITaskParallel.validation(), true); + AvgMPITaskParallel.pre_processing(); + AvgMPITaskParallel.run(); + AvgMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector out_seq(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskDataSeq->outputs_count.emplace_back(out_seq.size()); + + // Create Task + kalyakina_a_average_value_mpi::FindingAverageMPITaskSequential AvgMpiTaskSequential(taskDataSeq); + ASSERT_EQ(AvgMpiTaskSequential.validation(), true); + AvgMpiTaskSequential.pre_processing(); + AvgMpiTaskSequential.run(); + AvgMpiTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out_mpi[0], out_seq[0]); + } +} + +TEST(kalyakina_a_average_value_mpi, Test_Avg_20) { + boost::mpi::communicator world; + std::vector in{}; + std::vector out_mpi(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 20; + const int sum = 3500; + in = RandomVectorWithFixSum(sum, count); + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out_mpi.data())); + taskDataPar->outputs_count.emplace_back(out_mpi.size()); + } + + kalyakina_a_average_value_mpi::FindingAverageMPITaskParallel AvgMPITaskParallel(taskDataPar); + ASSERT_EQ(AvgMPITaskParallel.validation(), true); + AvgMPITaskParallel.pre_processing(); + AvgMPITaskParallel.run(); + AvgMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector out_seq(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskDataSeq->outputs_count.emplace_back(out_seq.size()); + + // Create Task + kalyakina_a_average_value_mpi::FindingAverageMPITaskSequential AvgMpiTaskSequential(taskDataSeq); + ASSERT_EQ(AvgMpiTaskSequential.validation(), true); + AvgMpiTaskSequential.pre_processing(); + AvgMpiTaskSequential.run(); + AvgMpiTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out_mpi[0], out_seq[0]); + } +} + +TEST(kalyakina_a_average_value_mpi, Test_Avg_50) { + boost::mpi::communicator world; + std::vector in{}; + std::vector out_mpi(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 50; + const int sum = 8000; + in = RandomVectorWithFixSum(sum, count); + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out_mpi.data())); + taskDataPar->outputs_count.emplace_back(out_mpi.size()); + } + + kalyakina_a_average_value_mpi::FindingAverageMPITaskParallel AvgMPITaskParallel(taskDataPar); + ASSERT_EQ(AvgMPITaskParallel.validation(), true); + AvgMPITaskParallel.pre_processing(); + AvgMPITaskParallel.run(); + AvgMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector out_seq(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskDataSeq->outputs_count.emplace_back(out_seq.size()); + + // Create Task + kalyakina_a_average_value_mpi::FindingAverageMPITaskSequential AvgMpiTaskSequential(taskDataSeq); + ASSERT_EQ(AvgMpiTaskSequential.validation(), true); + AvgMpiTaskSequential.pre_processing(); + AvgMpiTaskSequential.run(); + AvgMpiTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out_mpi[0], out_seq[0]); + } +} + +TEST(kalyakina_a_average_value_mpi, Test_Avg_70) { + boost::mpi::communicator world; + std::vector in{}; + std::vector out_mpi(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 70; + const int sum = 10000; + in = RandomVectorWithFixSum(sum, count); + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out_mpi.data())); + taskDataPar->outputs_count.emplace_back(out_mpi.size()); + } + + kalyakina_a_average_value_mpi::FindingAverageMPITaskParallel AvgMPITaskParallel(taskDataPar); + ASSERT_EQ(AvgMPITaskParallel.validation(), true); + AvgMPITaskParallel.pre_processing(); + AvgMPITaskParallel.run(); + AvgMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector out_seq(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskDataSeq->outputs_count.emplace_back(out_seq.size()); + + // Create Task + kalyakina_a_average_value_mpi::FindingAverageMPITaskSequential AvgMpiTaskSequential(taskDataSeq); + ASSERT_EQ(AvgMpiTaskSequential.validation(), true); + AvgMpiTaskSequential.pre_processing(); + AvgMpiTaskSequential.run(); + AvgMpiTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out_mpi[0], out_seq[0]); + } +} + +TEST(kalyakina_a_average_value_mpi, Test_Avg_100) { + boost::mpi::communicator world; + std::vector in{}; + std::vector out_mpi(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 100; + const int sum = 20000; + in = RandomVectorWithFixSum(sum, count); + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out_mpi.data())); + taskDataPar->outputs_count.emplace_back(out_mpi.size()); + } + + kalyakina_a_average_value_mpi::FindingAverageMPITaskParallel AvgMPITaskParallel(taskDataPar); + ASSERT_EQ(AvgMPITaskParallel.validation(), true); + AvgMPITaskParallel.pre_processing(); + AvgMPITaskParallel.run(); + AvgMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector out_seq(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskDataSeq->outputs_count.emplace_back(out_seq.size()); + + // Create Task + kalyakina_a_average_value_mpi::FindingAverageMPITaskSequential AvgMpiTaskSequential(taskDataSeq); + ASSERT_EQ(AvgMpiTaskSequential.validation(), true); + AvgMpiTaskSequential.pre_processing(); + AvgMpiTaskSequential.run(); + AvgMpiTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out_mpi[0], out_seq[0]); + } +} diff --git a/tasks/mpi/kalyakina_a_average_value/include/ops_mpi.hpp b/tasks/mpi/kalyakina_a_average_value/include/ops_mpi.hpp new file mode 100644 index 00000000000..70c718cc013 --- /dev/null +++ b/tasks/mpi/kalyakina_a_average_value/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kalyakina_a_average_value_mpi { + +class FindingAverageMPITaskSequential : public ppc::core::Task { + public: + explicit FindingAverageMPITaskSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_vector; + double average_value{}; +}; + +class FindingAverageMPITaskParallel : public ppc::core::Task { + public: + explicit FindingAverageMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_vector; + std::vector local_input_vector; + int result{}; + boost::mpi::communicator world; +}; + +} // namespace kalyakina_a_average_value_mpi \ No newline at end of file diff --git a/tasks/mpi/kalyakina_a_average_value/perf_tests/main.cpp b/tasks/mpi/kalyakina_a_average_value/perf_tests/main.cpp new file mode 100644 index 00000000000..f87e315d1ea --- /dev/null +++ b/tasks/mpi/kalyakina_a_average_value/perf_tests/main.cpp @@ -0,0 +1,103 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kalyakina_a_average_value/include/ops_mpi.hpp" + +std::vector RandomVectorWithFixSum(int sum, const int& count) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector result_vector(count); + for (int i = 0; i < count - 1; i++) { + result_vector[i] = gen() % (std::min(sum, 255) - 1); + sum -= result_vector[i]; + } + result_vector[count - 1] = sum; + return result_vector; +} + +TEST(kalyakina_a_average_value_mpi, Avg_pipeline_run) { + boost::mpi::communicator world; + std::vector in{}; + std::vector out_mpi(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count = 120; + int sum = 25000; + if (world.rank() == 0) { + in = RandomVectorWithFixSum(sum, count); + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out_mpi.data())); + taskDataPar->outputs_count.emplace_back(out_mpi.size()); + } + + auto AvgMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(AvgMpiTaskParallel->validation(), true); + AvgMpiTaskParallel->pre_processing(); + AvgMpiTaskParallel->run(); + AvgMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(AvgMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_DOUBLE_EQ(out_mpi[0], (double)sum / count); + } +} + +TEST(kalyakina_a_average_value_mpi, Avg_task_run) { + boost::mpi::communicator world; + std::vector in{}; + std::vector out_mpi(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count = 120; + int sum = 25000; + if (world.rank() == 0) { + in = RandomVectorWithFixSum(sum, count); + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out_mpi.data())); + taskDataPar->outputs_count.emplace_back(out_mpi.size()); + } + + auto AvgMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(AvgMpiTaskParallel->validation(), true); + AvgMpiTaskParallel->pre_processing(); + AvgMpiTaskParallel->run(); + AvgMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(AvgMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_DOUBLE_EQ(out_mpi[0], (double)sum / count); + } +} diff --git a/tasks/mpi/kalyakina_a_average_value/src/ops_mpi.cpp b/tasks/mpi/kalyakina_a_average_value/src/ops_mpi.cpp new file mode 100644 index 00000000000..0a13c34b4fa --- /dev/null +++ b/tasks/mpi/kalyakina_a_average_value/src/ops_mpi.cpp @@ -0,0 +1,112 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/kalyakina_a_average_value/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool kalyakina_a_average_value_mpi::FindingAverageMPITaskSequential::pre_processing() { + internal_order_test(); + + // Init value for input and output + input_vector = std::vector(taskData->inputs_count[0]); + int* it = reinterpret_cast(taskData->inputs[0]); + std::copy(it, it + taskData->inputs_count[0], input_vector.begin()); + + // Init value for output + average_value = 0.0; + return true; +} + +bool kalyakina_a_average_value_mpi::FindingAverageMPITaskSequential::validation() { + internal_order_test(); + + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool kalyakina_a_average_value_mpi::FindingAverageMPITaskSequential::run() { + internal_order_test(); + for (unsigned int i = 0; i < input_vector.size(); i++) { + average_value += input_vector[i]; + } + average_value /= input_vector.size(); + return true; +} + +bool kalyakina_a_average_value_mpi::FindingAverageMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = average_value; + return true; +} + +bool kalyakina_a_average_value_mpi::FindingAverageMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + // Init vectors + input_vector = std::vector(taskData->inputs_count[0]); + int* it = reinterpret_cast(taskData->inputs[0]); + std::copy(it, it + taskData->inputs_count[0], input_vector.begin()); + } + + // Init value for output + if (world.rank() == 0) { + result = 0; + } + return true; +} + +bool kalyakina_a_average_value_mpi::FindingAverageMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool kalyakina_a_average_value_mpi::FindingAverageMPITaskParallel::run() { + internal_order_test(); + unsigned int part = 0; + unsigned int reminder = 0; + if (world.rank() == 0) { + part = taskData->inputs_count[0] / world.size(); + reminder = taskData->inputs_count[0] % world.size(); + } + boost::mpi::broadcast(world, part, 0); + boost::mpi::broadcast(world, reminder, 0); + std::vector distr(world.size(), part); + std::vector displ(world.size()); + for (unsigned int i = 0; i < static_cast(world.size()); i++) { + if (reminder > 0) { + distr[i]++; + reminder--; + } + if (i == 0) { + displ[i] = 0; + } else { + displ[i] = displ[i - 1] + distr[i - 1]; + } + } + local_input_vector = std::vector(distr[world.rank()]); + boost::mpi::scatterv(world, input_vector.data(), distr, displ, local_input_vector.data(), distr[world.rank()], 0); + + int local_res = 0; + for (unsigned int i = 0; i < local_input_vector.size(); i++) { + local_res += local_input_vector[i]; + } + boost::mpi::reduce(world, local_res, result, std::plus<>(), 0); + return true; +} + +bool kalyakina_a_average_value_mpi::FindingAverageMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = (double)result / taskData->inputs_count[0]; + } + return true; +} diff --git a/tasks/seq/kalyakina_a_average_value/func_tests/main.cpp b/tasks/seq/kalyakina_a_average_value/func_tests/main.cpp new file mode 100644 index 00000000000..4d498136067 --- /dev/null +++ b/tasks/seq/kalyakina_a_average_value/func_tests/main.cpp @@ -0,0 +1,159 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/kalyakina_a_average_value/include/ops_seq.hpp" + +std::vector RandomVectorWithFixSum(int sum, const int &count) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector result_vector(count); + for (int i = 0; i < count - 1; i++) { + result_vector[i] = gen() % (std::min(sum, 255) - 1); + sum -= result_vector[i]; + } + result_vector[count - 1] = sum; + return result_vector; +} + +TEST(kalyakina_a_average_value_seq, Test_Avg_10) { + const int count = 10; + const int sum = 1000; + const double expected_value = (double)(sum) / count; + + // Create data + std::vector in = RandomVectorWithFixSum(sum, count); + std::vector out(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kalyakina_a_average_value_seq::FindingAverageOfVectorElementsTaskSequential AverageValueTaskSequential(taskDataSeq); + + ASSERT_EQ(AverageValueTaskSequential.validation(), true); + AverageValueTaskSequential.pre_processing(); + AverageValueTaskSequential.run(); + AverageValueTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out[0], expected_value); +} + +TEST(kalyakina_a_average_value_seq, Test_Avg_20) { + const int count = 20; + const int sum = 3500; + const double expected_value = (double)sum / count; + + // Create data + std::vector in = RandomVectorWithFixSum(sum, count); + std::vector out(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kalyakina_a_average_value_seq::FindingAverageOfVectorElementsTaskSequential AverageValueTaskSequential(taskDataSeq); + + ASSERT_EQ(AverageValueTaskSequential.validation(), true); + AverageValueTaskSequential.pre_processing(); + AverageValueTaskSequential.run(); + AverageValueTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out[0], expected_value); +} + +TEST(kalyakina_a_average_value_seq, Test_Avg_50) { + const int count = 50; + const int sum = 8000; + const double expected_value = (double)sum / count; + + // Create data + std::vector in = RandomVectorWithFixSum(sum, count); + std::vector out(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kalyakina_a_average_value_seq::FindingAverageOfVectorElementsTaskSequential AverageValueTaskSequential(taskDataSeq); + + ASSERT_EQ(AverageValueTaskSequential.validation(), true); + AverageValueTaskSequential.pre_processing(); + AverageValueTaskSequential.run(); + AverageValueTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out[0], expected_value); +} + +TEST(kalyakina_a_average_value_seq, Test_Avg_70) { + const int count = 70; + const int sum = 10000; + const double expected_value = (double)sum / count; + + // Create data + std::vector in = RandomVectorWithFixSum(sum, count); + std::vector out(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kalyakina_a_average_value_seq::FindingAverageOfVectorElementsTaskSequential AverageValueTaskSequential(taskDataSeq); + + ASSERT_EQ(AverageValueTaskSequential.validation(), true); + AverageValueTaskSequential.pre_processing(); + AverageValueTaskSequential.run(); + AverageValueTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out[0], expected_value); +} + +TEST(kalyakina_a_average_value_seq, Test_Avg_100) { + const int count = 100; + const int sum = 20000; + const double expected_value = (double)sum / count; + + // Create data + std::vector in = RandomVectorWithFixSum(sum, count); + std::vector out(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kalyakina_a_average_value_seq::FindingAverageOfVectorElementsTaskSequential AverageValueTaskSequential(taskDataSeq); + + ASSERT_EQ(AverageValueTaskSequential.validation(), true); + AverageValueTaskSequential.pre_processing(); + AverageValueTaskSequential.run(); + AverageValueTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(out[0], expected_value); +} diff --git a/tasks/seq/kalyakina_a_average_value/include/ops_seq.hpp b/tasks/seq/kalyakina_a_average_value/include/ops_seq.hpp new file mode 100644 index 00000000000..85411bc196d --- /dev/null +++ b/tasks/seq/kalyakina_a_average_value/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kalyakina_a_average_value_seq { + +class FindingAverageOfVectorElementsTaskSequential : public ppc::core::Task { + public: + explicit FindingAverageOfVectorElementsTaskSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_vector; + double average_value{}; +}; + +} // namespace kalyakina_a_average_value_seq \ No newline at end of file diff --git a/tasks/seq/kalyakina_a_average_value/perf_tests/main.cpp b/tasks/seq/kalyakina_a_average_value/perf_tests/main.cpp new file mode 100644 index 00000000000..0961a177d60 --- /dev/null +++ b/tasks/seq/kalyakina_a_average_value/perf_tests/main.cpp @@ -0,0 +1,104 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kalyakina_a_average_value/include/ops_seq.hpp" + +std::vector RandomVectorWithFixSum(int sum, const int &count) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector result_vector(count); + for (int i = 0; i < count - 1; i++) { + result_vector[i] = gen() % (std::min(sum, 255) - 1); + sum -= result_vector[i]; + } + result_vector[count - 1] = sum; + return result_vector; +} + +TEST(kalyakina_a_average_value_seq, test_pipeline_run) { + const int count = 100; + const int sum = 20000; + const double expected_value = (double)sum / count; + + // Create data + std::vector in = RandomVectorWithFixSum(sum, count); + std::vector out(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto AverageValueTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(AverageValueTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_DOUBLE_EQ(out[0], expected_value); +} + +TEST(kalyakina_a_average_value_seq, test_task_run) { + const int count = 100; + const int sum = 20000; + const double expected_value = (double)sum / count; + + // Create data + std::vector in = RandomVectorWithFixSum(sum, count); + std::vector out(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto AverageValueTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(AverageValueTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_DOUBLE_EQ(out[0], expected_value); +} diff --git a/tasks/seq/kalyakina_a_average_value/src/ops_seq.cpp b/tasks/seq/kalyakina_a_average_value/src/ops_seq.cpp new file mode 100644 index 00000000000..75cb241de61 --- /dev/null +++ b/tasks/seq/kalyakina_a_average_value/src/ops_seq.cpp @@ -0,0 +1,41 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/kalyakina_a_average_value/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool kalyakina_a_average_value_seq::FindingAverageOfVectorElementsTaskSequential::pre_processing() { + internal_order_test(); + + // Init value for input and output + input_vector = std::vector(taskData->inputs_count[0]); + int* it = reinterpret_cast(taskData->inputs[0]); + std::copy(it, it + taskData->inputs_count[0], input_vector.begin()); + average_value = 0.0; + return true; +} + +bool kalyakina_a_average_value_seq::FindingAverageOfVectorElementsTaskSequential::validation() { + internal_order_test(); + + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool kalyakina_a_average_value_seq::FindingAverageOfVectorElementsTaskSequential::run() { + internal_order_test(); + for (unsigned int i = 0; i < input_vector.size(); i++) { + average_value += input_vector[i]; + } + average_value /= input_vector.size(); + return true; +} + +bool kalyakina_a_average_value_seq::FindingAverageOfVectorElementsTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = average_value; + return true; +} From da7a3ebb3364914ba1bf55638e30f2cbb0970baa Mon Sep 17 00:00:00 2001 From: Roman Nikolaev <108869243+1alron@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:48:57 +0300 Subject: [PATCH 070/155] =?UTF-8?q?=D0=9D=D0=B8=D0=BA=D0=BE=D0=BB=D0=B0?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=A0=D0=BE=D0=BC=D0=B0=D0=BD.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2020.=20=D0=98=D0=BD=D1=82=D0=B5=D0=B3=D1=80=D0=B8?= =?UTF-8?q?=D1=80=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B5=20-=20=D0=BC=D0=B5?= =?UTF-8?q?=D1=82=D0=BE=D0=B4=20=D1=82=D1=80=D0=B0=D0=BF=D0=B5=D1=86=D0=B8?= =?UTF-8?q?=D0=B9.=20(#80)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательная версия: Промежуток интегрирования на заданном интервале [a, b] разбивается на n отрезков, каждый из которых является трапецией. Последовательно вычисляются их площади, после чего они складываются. Полученное число - приблизительное значение вычисленного интеграла. Параллельная версия: Параллельная версия отличается тем, что каждый процесс вычисляет свое, локальное значение, после чего они будут суммированы для получения общего результата. Реализуется это с помощью функции boost::mpi::broadcast (процессом с рангом 0 передаются значения из массива params остальным процессам в группе для произведения локальных расчетов, после чего функция boost::mpi::reduce собирает локальные значения вычислений каждого процесса, суммирует их и передает итоговую сумму в переменную res_ процесса с рангом 0). --------- Co-authored-by: Michael K. <130953568+kmichaelk@users.noreply.github.com> --- .../func_tests/main.cpp | 426 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 ++ .../perf_tests/main.cpp | 92 ++++ .../src/ops_mpi.cpp | 113 +++++ .../func_tests/main.cpp | 216 +++++++++ .../include/ops_seq.hpp | 25 + .../perf_tests/main.cpp | 81 ++++ .../src/ops_seq.cpp | 49 ++ 8 files changed, 1050 insertions(+) create mode 100644 tasks/mpi/nikolaev_r_trapezoidal_integral/func_tests/main.cpp create mode 100644 tasks/mpi/nikolaev_r_trapezoidal_integral/include/ops_mpi.hpp create mode 100644 tasks/mpi/nikolaev_r_trapezoidal_integral/perf_tests/main.cpp create mode 100644 tasks/mpi/nikolaev_r_trapezoidal_integral/src/ops_mpi.cpp create mode 100644 tasks/seq/nikolaev_r_trapezoidal_integral/func_tests/main.cpp create mode 100644 tasks/seq/nikolaev_r_trapezoidal_integral/include/ops_seq.hpp create mode 100644 tasks/seq/nikolaev_r_trapezoidal_integral/perf_tests/main.cpp create mode 100644 tasks/seq/nikolaev_r_trapezoidal_integral/src/ops_seq.cpp diff --git a/tasks/mpi/nikolaev_r_trapezoidal_integral/func_tests/main.cpp b/tasks/mpi/nikolaev_r_trapezoidal_integral/func_tests/main.cpp new file mode 100644 index 00000000000..1a31c72baf2 --- /dev/null +++ b/tasks/mpi/nikolaev_r_trapezoidal_integral/func_tests/main.cpp @@ -0,0 +1,426 @@ +#define _USE_MATH_DEFINES + +#include + +#include +#include +#include +#include + +#include "mpi/nikolaev_r_trapezoidal_integral/include/ops_mpi.hpp" + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_int_linear_func) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 3.0; + int n = 100000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel testMpiTaskParallel(taskDataPar); + auto f = [](double x) { return 2 * x + 8; }; + testMpiTaskParallel.set_function(f); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.set_function(f); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(global_result[0], reference_result[0], 0.01); + } +} + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_int_squared_func) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = -1.0; + double b = 4.0; + int n = 100000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel testMpiTaskParallel(taskDataPar); + auto f = [](double x) { return x * x + 2 * x + 1; }; + testMpiTaskParallel.set_function(f); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.set_function(f); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(global_result[0], reference_result[0], 0.01); + } +} + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_int_trippled_func) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 10.0; + int n = 100000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel testMpiTaskParallel(taskDataPar); + auto f = [](double x) { return std::pow(x, 3) + 2 * x * x + 8; }; + testMpiTaskParallel.set_function(f); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.set_function(f); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(global_result[0], reference_result[0], 0.01); + } +} + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_int_cosine_func) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = M_PI; + double b = M_PI * 2; + int n = 100000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel testMpiTaskParallel(taskDataPar); + auto f = [](double x) { return std::cos(x); }; + testMpiTaskParallel.set_function(f); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.set_function(f); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(global_result[0], reference_result[0], 0.01); + } +} + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_int_sine_func) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = M_PI; + double b = M_PI * 4; + int n = 100000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel testMpiTaskParallel(taskDataPar); + auto f = [](double x) { return std::sin(x); }; + testMpiTaskParallel.set_function(f); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.set_function(f); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(global_result[0], reference_result[0], 0.01); + } +} + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_int_pow_func) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = -1.0; + double b = 6.0; + int n = 100000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel testMpiTaskParallel(taskDataPar); + auto f = [](double x) { return std::pow(3, x); }; + testMpiTaskParallel.set_function(f); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.set_function(f); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(global_result[0], reference_result[0], 0.01); + } +} + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_int_exp_func) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = -2.0; + double b = 8.0; + int n = 100000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel testMpiTaskParallel(taskDataPar); + auto f = [](double x) { return std::exp(x * 2); }; + testMpiTaskParallel.set_function(f); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.set_function(f); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(global_result[0], reference_result[0], 0.01); + } +} + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_int_mixed_func) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = -2.0; + double b = 10.0; + int n = 100000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel testMpiTaskParallel(taskDataPar); + auto f = [](double x) { return std::pow(x, 4) - std::exp(x) + std::pow(4, x); }; + testMpiTaskParallel.set_function(f); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.set_function(f); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(global_result[0], reference_result[0], 0.01); + } +} \ No newline at end of file diff --git a/tasks/mpi/nikolaev_r_trapezoidal_integral/include/ops_mpi.hpp b/tasks/mpi/nikolaev_r_trapezoidal_integral/include/ops_mpi.hpp new file mode 100644 index 00000000000..501a508c1e1 --- /dev/null +++ b/tasks/mpi/nikolaev_r_trapezoidal_integral/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace nikolaev_r_trapezoidal_integral_mpi { + +class TrapezoidalIntegralSequential : public ppc::core::Task { + public: + explicit TrapezoidalIntegralSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + void set_function(const std::function& f); + + private: + double a_{}, b_{}, n_{}, res_{}; + std::function function_; + + static double integrate_function(double a, double b, int n, const std::function& f); +}; + +class TrapezoidalIntegralParallel : public ppc::core::Task { + public: + explicit TrapezoidalIntegralParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + void set_function(const std::function& f); + + private: + double a_{}, b_{}, n_{}, res_{}; + std::function function_; + boost::mpi::communicator world; + + double integrate_function(double a, double b, int n, const std::function& f); +}; +} // namespace nikolaev_r_trapezoidal_integral_mpi \ No newline at end of file diff --git a/tasks/mpi/nikolaev_r_trapezoidal_integral/perf_tests/main.cpp b/tasks/mpi/nikolaev_r_trapezoidal_integral/perf_tests/main.cpp new file mode 100644 index 00000000000..a1894ad6be7 --- /dev/null +++ b/tasks/mpi/nikolaev_r_trapezoidal_integral/perf_tests/main.cpp @@ -0,0 +1,92 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/nikolaev_r_trapezoidal_integral/include/ops_mpi.hpp" + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_pipeline_run) { + boost::mpi::communicator world; + double a = -2.0; + double b = 10.0; + int n = 100000; + double result = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->outputs.emplace_back(reinterpret_cast(&result)); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + auto f = [](double x) { return std::pow(x, 3) - std::pow(3, x) + std::exp(x); }; + testMpiTaskParallel->set_function(f); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double accurate_result = -29226.28; + ASSERT_NEAR(accurate_result, result, 0.01); + } +} + +TEST(nikolaev_r_trapezoidal_integral_mpi, test_task_run) { + boost::mpi::communicator world; + double a = -2.0; + double b = 10.0; + int n = 100000; + double result = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->outputs.emplace_back(reinterpret_cast(&result)); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + auto f = [](double x) { return std::pow(x, 3) - std::pow(3, x) + std::exp(x); }; + testMpiTaskParallel->set_function(f); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double accurate_result = -29226.28; + ASSERT_NEAR(accurate_result, result, 0.01); + } +} diff --git a/tasks/mpi/nikolaev_r_trapezoidal_integral/src/ops_mpi.cpp b/tasks/mpi/nikolaev_r_trapezoidal_integral/src/ops_mpi.cpp new file mode 100644 index 00000000000..ae0427d19fb --- /dev/null +++ b/tasks/mpi/nikolaev_r_trapezoidal_integral/src/ops_mpi.cpp @@ -0,0 +1,113 @@ +#include "mpi/nikolaev_r_trapezoidal_integral/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +bool nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential::pre_processing() { + internal_order_test(); + a_ = *reinterpret_cast(taskData->inputs[0]); + b_ = *reinterpret_cast(taskData->inputs[1]); + n_ = *reinterpret_cast(taskData->inputs[2]); + return true; +} + +bool nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential::run() { + internal_order_test(); + res_ = integrate_function(a_, b_, n_, function_); + return true; +} + +bool nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = res_; + return true; +} + +bool nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + a_ = *reinterpret_cast(taskData->inputs[0]); + b_ = *reinterpret_cast(taskData->inputs[1]); + n_ = *reinterpret_cast(taskData->inputs[2]); + } + return true; +} + +bool nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel::run() { + internal_order_test(); + double params[3] = {0.0}; + if (world.rank() == 0) { + params[0] = a_; + params[1] = b_; + params[2] = static_cast(n_); + } + boost::mpi::broadcast(world, params, std::size(params), 0); + double local_res = integrate_function(params[0], params[1], static_cast(params[2]), function_); + boost::mpi::reduce(world, local_res, res_, std::plus(), 0); + return true; +} + +bool nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res_; + } + return true; +} + +void nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential::set_function( + const std::function& f) { + function_ = f; +} + +void nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel::set_function( + const std::function& f) { + function_ = f; +} + +double nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralSequential::integrate_function( + double a, double b, int n, const std::function& f) { + const double width = (b - a) / n; + double result = 0.0; + for (int step = 0; step < n; step++) { + const double x1 = a + step * width; + const double x2 = a + (step + 1) * width; + + result += 0.5 * (x2 - x1) * (f(x1) + f(x2)); + } + + return result; +} + +double nikolaev_r_trapezoidal_integral_mpi::TrapezoidalIntegralParallel::integrate_function( + double a, double b, int n, const std::function& f) { + int rank = world.rank(); + int size = world.size(); + + const double width = (b - a) / n; + double result = 0.0; + for (int step = rank; step < n; step += size) { + const double x1 = a + step * width; + const double x2 = a + (step + 1) * width; + + result += 0.5 * (x2 - x1) * (f(x1) + f(x2)); + } + + return result; +} \ No newline at end of file diff --git a/tasks/seq/nikolaev_r_trapezoidal_integral/func_tests/main.cpp b/tasks/seq/nikolaev_r_trapezoidal_integral/func_tests/main.cpp new file mode 100644 index 00000000000..2c43545d556 --- /dev/null +++ b/tasks/seq/nikolaev_r_trapezoidal_integral/func_tests/main.cpp @@ -0,0 +1,216 @@ +#define _USE_MATH_DEFINES + +#include + +#include +#include + +#include "seq/nikolaev_r_trapezoidal_integral/include/ops_seq.hpp" + +TEST(nikolaev_r_trapezoidal_integral_seq, test_int_linear_func) { + const double a = -1.0; + const double b = 1.0; + const int n = 500; + const double expected = 6.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + auto f = [](double x) { return 4 * x + 3; }; + testTaskSequential->set_function(f); + ASSERT_EQ(testTaskSequential->validation(), true); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + ASSERT_NEAR(expected, out[0], 0.01); +} + +TEST(nikolaev_r_trapezoidal_integral_seq, test_int_squared_func) { + const double a = 0.0; + const double b = 2.0; + const int n = 500; + const double expected = 29.33; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + auto f = [](double x) { return 5 * x * x + 8; }; + testTaskSequential->set_function(f); + ASSERT_EQ(testTaskSequential->validation(), true); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + ASSERT_NEAR(expected, out[0], 0.01); +} + +TEST(nikolaev_r_trapezoidal_integral_seq, test_int_4th_degree_pol_func) { + const double a = -2.0; + const double b = 2.0; + const int n = 500; + const double expected = -5.87; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + auto f = [](double x) { return std::pow(x, 4) + 3 * std::pow(x, 3) - 5 * x * x + 2; }; + testTaskSequential->set_function(f); + ASSERT_EQ(testTaskSequential->validation(), true); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + ASSERT_NEAR(expected, out[0], 0.01); +} + +TEST(nikolaev_r_trapezoidal_integral_seq, test_int_exp_func) { + const double a = 0.0; + const double b = 1.0; + const int n = 500; + const double expected = 6.36; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + auto f = [](double x) { return std::exp(3 * x); }; + testTaskSequential->set_function(f); + ASSERT_EQ(testTaskSequential->validation(), true); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + ASSERT_NEAR(expected, out[0], 0.01); +} + +TEST(nikolaev_r_trapezoidal_integral_seq, test_int_pow_func) { + const double a = 0.0; + const double b = 3.0; + const int n = 500; + const double expected = 63.44; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + auto f = [](double x) { return std::pow(4, x) + 6; }; + testTaskSequential->set_function(f); + ASSERT_EQ(testTaskSequential->validation(), true); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + ASSERT_NEAR(expected, out[0], 0.01); +} + +TEST(nikolaev_r_trapezoidal_integral_seq, test_int_cosine_func) { + const double a = M_PI / 2; + const double b = M_PI * 2; + const int n = 500; + const double expected = -1.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + auto f = [](double x) { return std::cos(x); }; + testTaskSequential->set_function(f); + ASSERT_EQ(testTaskSequential->validation(), true); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + ASSERT_NEAR(expected, out[0], 0.01); +} + +TEST(nikolaev_r_trapezoidal_integral_seq, test_int_sin_func) { + const double a = 0.0; + const double b = M_PI; + const int n = 500; + const double expected = 2.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + auto f = [](double x) { return std::sin(x); }; + testTaskSequential->set_function(f); + ASSERT_EQ(testTaskSequential->validation(), true); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + ASSERT_NEAR(expected, out[0], 0.01); +} + +TEST(nikolaev_r_trapezoidal_integral_seq, test_int_mixed_func) { + const double a = 1; + const double b = 3; + const int n = 500; + const double expected = 83.19; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + auto f = [](double x) { return 3 * x * x + std::pow(5, x) - std::exp(x); }; + testTaskSequential->set_function(f); + ASSERT_EQ(testTaskSequential->validation(), true); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + ASSERT_NEAR(expected, out[0], 0.01); +} \ No newline at end of file diff --git a/tasks/seq/nikolaev_r_trapezoidal_integral/include/ops_seq.hpp b/tasks/seq/nikolaev_r_trapezoidal_integral/include/ops_seq.hpp new file mode 100644 index 00000000000..247d6c906fd --- /dev/null +++ b/tasks/seq/nikolaev_r_trapezoidal_integral/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace nikolaev_r_trapezoidal_integral_seq { + +class TrapezoidalIntegralSequential : public ppc::core::Task { + public: + explicit TrapezoidalIntegralSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + void set_function(std::function f); + + private: + double a_{}, b_{}, n_{}, res_{}; + std::function function_; + static double integrate_function(double a, double b, int n, const std::function& f); +}; +} // namespace nikolaev_r_trapezoidal_integral_seq diff --git a/tasks/seq/nikolaev_r_trapezoidal_integral/perf_tests/main.cpp b/tasks/seq/nikolaev_r_trapezoidal_integral/perf_tests/main.cpp new file mode 100644 index 00000000000..18d3e9b38bd --- /dev/null +++ b/tasks/seq/nikolaev_r_trapezoidal_integral/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/nikolaev_r_trapezoidal_integral/include/ops_seq.hpp" + +TEST(nikolaev_r_trapezoidal_integral_seq, test_pipeline_run) { + const double a = -1.0; + const double b = 2.0; + const double expected = 14.05; + const int n = 100000; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + + auto f = [](double x) { return std::pow(2, x) + 3 * x * x; }; + testTaskSequential->set_function(f); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_NEAR(expected, out[0], 0.01); +} + +TEST(nikolaev_r_trapezoidal_integral_seq, test_task_run) { + const double a = -1.0; + const double b = 2.0; + const double expected = 14.05; + const int n = 100000; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + + auto f = [](double x) { return std::pow(2, x) + 3 * x * x; }; + testTaskSequential->set_function(f); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_NEAR(expected, out[0], 0.01); +} diff --git a/tasks/seq/nikolaev_r_trapezoidal_integral/src/ops_seq.cpp b/tasks/seq/nikolaev_r_trapezoidal_integral/src/ops_seq.cpp new file mode 100644 index 00000000000..6032c0e9463 --- /dev/null +++ b/tasks/seq/nikolaev_r_trapezoidal_integral/src/ops_seq.cpp @@ -0,0 +1,49 @@ +#include "seq/nikolaev_r_trapezoidal_integral/include/ops_seq.hpp" + +#include + +bool nikolaev_r_trapezoidal_integral_seq::TrapezoidalIntegralSequential::pre_processing() { + internal_order_test(); + auto* inputs = reinterpret_cast(taskData->inputs[0]); + a_ = inputs[0]; + b_ = inputs[1]; + n_ = static_cast(inputs[2]); + res_ = 0.0; + return true; +} + +bool nikolaev_r_trapezoidal_integral_seq::TrapezoidalIntegralSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; +} + +bool nikolaev_r_trapezoidal_integral_seq::TrapezoidalIntegralSequential::run() { + internal_order_test(); + res_ = integrate_function(a_, b_, n_, function_); + return true; +} + +bool nikolaev_r_trapezoidal_integral_seq::TrapezoidalIntegralSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +void nikolaev_r_trapezoidal_integral_seq::TrapezoidalIntegralSequential::set_function(std::function f) { + function_ = std::move(f); +} + +double nikolaev_r_trapezoidal_integral_seq::TrapezoidalIntegralSequential::integrate_function( + double a, double b, int n, const std::function& f) { + const double width = (b - a) / n; + + double result = 0.0; + for (int step = 0; step < n; step++) { + const double x1 = a + step * width; + const double x2 = a + (step + 1) * width; + + result += 0.5 * (x2 - x1) * (f(x1) + f(x2)); + } + + return result; +} \ No newline at end of file From 7eb30f067e9704890bf4fb26e68683f9ed0dde47 Mon Sep 17 00:00:00 2001 From: TayaGordeeva <121258487+TayaGordeeva@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:49:54 +0300 Subject: [PATCH 071/155] =?UTF-8?q?=D0=93=D0=BE=D1=80=D0=B4=D0=B5=D0=B5?= =?UTF-8?q?=D0=B2=D0=B0=20=D0=A2=D0=B0=D0=B8=D1=81=D0=B8=D1=8F.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=2016.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA=D1=81=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE?= =?UTF-8?q?=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8?= =?UTF-8?q?=D1=86=D1=8B=20(#83)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание программы.** Программа вычисляет максимальное значение в каждом столбце матрицы. _-Последовательная задача:_ - изначально максимальное значение равно первому элементу текущего столбца; происходит сравнение текущего максимального значения с каждым элементом из соответствующего столбца - если значение элемента больше, то оно становится максимальным значением - результат: вектор с максимальными значениями для каждого столбца _-MPI задача:_ - матрица разбивается на кол-во процессов - каждый процесс ищет максимум в каждом столбце своих данных - результаты процессов отправляются на главный процесс и объединяются - результат: вектор максимальных значений для каждого столбца --------- Co-authored-by: TayaGordeeva --- .../func_tests/main.cpp | 210 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 ++++ .../perf_tests/main.cpp | 81 +++++++ .../src/ops_mpi.cpp | 186 ++++++++++++++++ .../func_tests/main.cpp | 149 +++++++++++++ .../include/ops_seq.hpp | 25 +++ .../perf_tests/main.cpp | 79 +++++++ .../src/ops_seq.cpp | 83 +++++++ 8 files changed, 860 insertions(+) create mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp create mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..e170a88d7f8 --- /dev/null +++ b/tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp @@ -0,0 +1,210 @@ +#include + +#include +#include +#include + +#include "mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp" + +TEST(gordeva_t_max_val_of_column_matrix_mpi, IsEmptyInput) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, IsEmptyOutput) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(5); + taskDataPar->inputs_count.push_back(5); + taskDataPar->inputs.push_back(reinterpret_cast(new int[25])); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Max_val_of_500_columns_with_random) { + boost::mpi::communicator world; + + const int rows = 500; + const int cols = 500; + std::vector> global_matr; + std::vector global_max(cols, INT_MIN); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataPar->inputs_count = {rows, cols}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector max_example(cols, INT_MIN); + + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(max_example.data())); + taskDataSeq->outputs_count.emplace_back(max_example.size()); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(global_max, max_example); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Max_val_of_500_1000_columns_with_random) { + boost::mpi::communicator world; + + const int rows = 500; + const int cols = 1000; + std::vector> global_matr; + std::vector global_max(cols, INT_MIN); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataPar->inputs_count = {rows, cols}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector max_example(cols, INT_MIN); + + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(max_example.data())); + taskDataSeq->outputs_count.emplace_back(max_example.size()); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int i = 0; i < cols; i++) { + ASSERT_EQ(global_max[i], max_example[i]); + } + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Max_val_of_1000_3000_columns_with_random) { + boost::mpi::communicator world; + + const int rows = 1000; + const int cols = 3000; + std::vector> global_matr; + std::vector global_max(cols, INT_MIN); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataPar->inputs_count = {rows, cols}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector max_example(cols, INT_MIN); + + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(max_example.data())); + taskDataSeq->outputs_count.emplace_back(max_example.size()); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int i = 0; i < cols; i++) { + ASSERT_EQ(global_max[i], max_example[i]); + } + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Incorrect_val_size_of_input) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(2); + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs.push_back(reinterpret_cast(new int[6])); + taskDataPar->outputs_count.push_back(2); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Incorrect_val_of_output) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(10); + taskDataPar->inputs_count.push_back(15); + taskDataPar->inputs.push_back(reinterpret_cast(new int[150])); + taskDataPar->outputs_count.push_back(2); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..43906c64bf9 --- /dev/null +++ b/tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace gordeva_t_max_val_of_column_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + static std::vector gen_rand_vec(int s, int low = 0, int upp = 50); + static std::vector> gen_rand_matr(int rows, int cols); + + private: + std::vector> input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_, local_input_; + std::vector res; + boost::mpi::communicator world; +}; + +} // namespace gordeva_t_max_val_of_column_matrix_mpi diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..b3aa5af7edb --- /dev/null +++ b/tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp" + +TEST(gordeva_t_max_val_of_column_matrix_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector> global_matr; + std::vector max_s; + + std::shared_ptr taskDataPar = std::make_shared(); + int rows = 5000; + int cols = 5000; + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); + max_s.resize(cols, INT_MIN); + for (auto& i : global_matr) { + taskDataPar->inputs.emplace_back(reinterpret_cast(i.data())); + } + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(max_s.data())); + taskDataPar->outputs_count.emplace_back(max_s.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t j = 0; j < max_s.size(); ++j) { + ASSERT_EQ(max_s[j], 200); + } + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + + std::vector> global_matr; + std::vector max_s; + std::shared_ptr taskDataPar = std::make_shared(); + int rows = 7000; + int cols = 7000; + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); + max_s.resize(cols, INT_MIN); + + for (auto& i : global_matr) { + taskDataPar->inputs.emplace_back(reinterpret_cast(i.data())); + } + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(max_s.data())); + taskDataPar->outputs_count.emplace_back(max_s.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t j = 0; j < max_s.size(); ++j) { + ASSERT_EQ(max_s[j], 200); + } + } +} diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..34be2cf970e --- /dev/null +++ b/tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp @@ -0,0 +1,186 @@ +#include "mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + int* input_matr = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) input_[i][j] = input_matr[j]; + } + + res.resize(cols); + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + if (taskData->inputs.empty() || taskData->outputs.empty()) return false; + if (taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) return false; + if (taskData->outputs_count.size() != 1) return false; + if (taskData->inputs_count.size() < 2) return false; + if (taskData->outputs_count[0] != taskData->inputs_count[1]) return false; + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + // int cols = input_[0].size(); + // int rows = input_.size(); + for (size_t i = 0; i < input_[0].size(); i++) { + int max_el = input_[0][i]; + for (size_t j = 1; j < input_.size(); j++) + if (input_[j][i] > max_el) max_el = input_[j][i]; + + res[i] = max_el; + } + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + int* output_matr = reinterpret_cast(taskData->outputs[0]); + + std::copy(res.begin(), res.end(), output_matr); + return true; +} + +std::vector gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_vec(int s, int low, int upp) { + std::vector v(s); + for (auto& i : v) i = low + (std::rand() % (upp - low + 1)); + return v; +} + +std::vector> gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(int rows, + int cols) { + std::vector> matr(rows, std::vector(cols)); + + for (int i = 0; i < rows; ++i) { + matr[i] = gen_rand_vec(cols, -500, 500); + } + for (int j = 0; j < cols; ++j) { + int row_rand = std::rand() % rows; + matr[row_rand][j] = 10; + } + return matr; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + if (taskData->inputs.empty() || taskData->outputs.empty()) return false; + if (taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) return false; + if (taskData->outputs_count.size() != 1) return false; + if (taskData->inputs_count.size() < 2) return false; + if (taskData->outputs_count[0] != taskData->inputs_count[1]) return false; + } + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + int rows = 0; + int cols = 0; + + int delta = 0; + int delta_1 = 0; + + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + } + + broadcast(world, rows, 0); + broadcast(world, cols, 0); + + delta = rows / world.size(); + delta_1 = rows % world.size(); + + if (world.rank() == 0) { + input_.resize(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + int* input_matr = reinterpret_cast(taskData->inputs[i]); + input_[i].assign(input_matr, input_matr + cols); + } + + for (int proc = 1; proc < world.size(); proc++) { + int row_1 = proc * delta + std::min(proc, delta_1); + int kol_vo = delta + (proc < delta_1 ? 1 : 0); + + for (int i = row_1; i < row_1 + kol_vo; i++) world.send(proc, 0, input_[i].data(), cols); + } + } + + int local_input_rows = delta + (world.rank() < delta_1 ? 1 : 0); + local_input_.resize(local_input_rows, std::vector(cols)); + + if (world.rank() == 0) { + std::copy(input_.begin(), input_.begin() + local_input_rows, local_input_.begin()); + } else { + for (int i = 0; i < local_input_rows; i++) world.recv(0, 0, local_input_[i].data(), cols); + } + + res.resize(cols); + + std::vector tmp_max(local_input_[0].size(), INT_MIN); + + for (size_t i = 0; i < local_input_[0].size(); i++) { + for (size_t j = 0; j < local_input_.size(); j++) { + tmp_max[i] = std::max(tmp_max[i], local_input_[j][i]); + } + } + + if (world.rank() == 0) { + std::vector max_s(res.size(), INT_MIN); + std::copy(tmp_max.begin(), tmp_max.end(), max_s.begin()); + + for (int proc = 1; proc < world.size(); proc++) { + std::vector proc_max(res.size()); + world.recv(proc, 0, proc_max.data(), res.size()); + + for (size_t i = 0; i < res.size(); i++) { + max_s[i] = std::max(max_s[i], proc_max[i]); + } + } + std::copy(max_s.begin(), max_s.end(), res.begin()); + } else { + world.send(0, 0, tmp_max.data(), tmp_max.size()); + } + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + std::copy(res.begin(), res.end(), reinterpret_cast(taskData->outputs[0])); + } + return true; +} diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..79ab69263fe --- /dev/null +++ b/tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp @@ -0,0 +1,149 @@ +#include + +#include +#include + +#include "seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp" + +TEST(gordeva_t_max_val_of_column_matrix_seq, IsEmptyInput) { + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, IsEmptyOutput) { + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(5); + taskDataSeq->inputs_count.push_back(5); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[25])); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Max_val_of_5000_columns_with_random) { + const int rows = 5000; + const int cols = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); + for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_el = matrix[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix[i][j] > max_el) { + max_el = matrix[i][j]; + } + } + ASSERT_EQ(res[j], max_el); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Max_val_of_500_1000_columns_with_random) { + const int rows = 500; + const int cols = 1000; + + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); + for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_el = matrix[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix[i][j] > max_el) { + max_el = matrix[i][j]; + } + } + ASSERT_EQ(res[j], max_el); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Max_val_of_1000_3000_columns_with_random) { + const int rows = 1000; + const int cols = 3000; + + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); + for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_el = matrix[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix[i][j] > max_el) { + max_el = matrix[i][j]; + } + } + ASSERT_EQ(res[j], max_el); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Incorrect_val_size_of_input) { + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(10); + taskDataSeq->inputs_count.push_back(0); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[10])); + taskDataSeq->outputs_count.push_back(1); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Incorrect_val_of_output) { + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(10); + taskDataSeq->inputs_count.push_back(15); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[150])); + taskDataSeq->outputs_count.push_back(10); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..d457539d242 --- /dev/null +++ b/tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace gordeva_t_max_val_of_column_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + static std::vector gen_rand_vec(int size, int lower_bound = 0, int upper_bound = 30); + static std::vector> gen_rand_matr(int rows, int cols); + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace gordeva_t_max_val_of_column_matrix_seq diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..a2052561a87 --- /dev/null +++ b/tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp @@ -0,0 +1,79 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp" + +TEST(gordeva_t_max_val_of_column_matrix_seq, test_pipeline_run) { + const int cols = 5000; + const int rows = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matrix = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); + + for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector res_vec(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_vec.data())); + taskDataSeq->outputs_count.emplace_back(res_vec.size()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < cols; i++) ASSERT_EQ(res_vec[i], 200); +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, test_task_run) { + const int cols = 7000; + const int rows = 7000; + + std::shared_ptr taskDataSeq = std::make_shared(); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matr_rand = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); + for (auto &row : matr_rand) taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector res_vec(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_vec.data())); + taskDataSeq->outputs_count.emplace_back(res_vec.size()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (int i = 0; i < cols; i++) ASSERT_EQ(res_vec[i], 200); +} diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..7e64ae19915 --- /dev/null +++ b/tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp @@ -0,0 +1,83 @@ +#include "seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +namespace gordeva_t_max_val_of_column_matrix_seq { + +bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + int* input_matr; + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + input_matr = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) input_[i][j] = input_matr[j]; + } + + res_.resize(cols); + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + + if (taskData->inputs.empty() || taskData->outputs.empty()) return false; + if (taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) return false; + if (taskData->outputs_count.size() != 1) return false; + if (taskData->inputs_count.size() < 2) return false; + if (taskData->outputs_count[0] != taskData->inputs_count[1]) return false; + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + for (size_t i = 0; i < input_[0].size(); i++) { + int max_el = input_[0][i]; + for (size_t j = 1; j < input_.size(); j++) + if (input_[j][i] > max_el) max_el = input_[j][i]; + + res_[i] = max_el; + } + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + int* output_matr = reinterpret_cast(taskData->outputs[0]); + + std::copy(res_.begin(), res_.end(), output_matr); + return true; +} + +std::vector gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_vec(int size, int lower_bound, + int upper_bound) { + std::vector v(size); + for (auto& number : v) number = lower_bound + (std::rand() % (upper_bound - lower_bound + 1)); + return v; +} + +std::vector> gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(int rows, + int cols) { + std::vector> matr(rows, std::vector(cols)); + + for (int i = 0; i < rows; ++i) { + matr[i] = gen_rand_vec(cols, -500, 500); + } + for (int j = 0; j < cols; ++j) { + int row_rand = std::rand() % rows; + matr[row_rand][j] = 10; + } + return matr; +} + +} // namespace gordeva_t_max_val_of_column_matrix_seq From faf842ed4f8e1821043c3991cd0c11fd820a8ef9 Mon Sep 17 00:00:00 2001 From: ermilovad <112872618+ermilovad@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:51:09 +0300 Subject: [PATCH 072/155] =?UTF-8?q?=D0=95=D1=80=D0=BC=D0=B8=D0=BB=D0=BE?= =?UTF-8?q?=D0=B2=D0=B0=20=D0=94=D0=B0=D1=80=D1=8C=D1=8F.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2014.=20=D0=9C=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB?= =?UTF-8?q?=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#86)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи Последовательно обходятся все элементы матрицы, при нахождении элемента меньше, чем текущий минимум - значение минимума будет обновлено. Описание MPI задачи Матрица разбивается на равные части. Каждый процесс получает свою часть матрицы, затем находит минимум в выделенной ему части. После этого все процессы отправляют свои результаты корневому процессу, который собирает их и находит минимум для всей матрицы. --- .../func_tests/main.cpp | 379 ++++++++++++++++++ .../include/ops_mpi.hpp | 49 +++ .../perf_tests/main.cpp | 140 +++++++ .../src/ops_mpi.cpp | 125 ++++++ .../func_tests/main.cpp | 236 +++++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 127 ++++++ .../src/ops_seq.cpp | 55 +++ 8 files changed, 1135 insertions(+) create mode 100644 tasks/mpi/ermilova_d_min_element_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/ermilova_d_min_element_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/ermilova_d_min_element_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/ermilova_d_min_element_matrix/func_tests/main.cpp create mode 100644 tasks/seq/ermilova_d_min_element_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/ermilova_d_min_element_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/ermilova_d_min_element_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/ermilova_d_min_element_matrix/func_tests/main.cpp b/tasks/mpi/ermilova_d_min_element_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..e848d6ea49f --- /dev/null +++ b/tasks/mpi/ermilova_d_min_element_matrix/func_tests/main.cpp @@ -0,0 +1,379 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp" + +std::vector getRandomVector(int size, int upper_border, int lower_border) { + std::random_device dev; + std::mt19937 gen(dev()); + if (size <= 0) throw "Incorrect size"; + std::vector vec(size); + for (int i = 0; i < size; i++) { + vec[i] = lower_border + gen() % (upper_border - lower_border + 1); + } + return vec; +} + +std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border) { + if (rows <= 0 || cols <= 0) throw "Incorrect size"; + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = getRandomVector(cols, upper_border, lower_border); + } + return vec; +} + +TEST(ermilova_d_min_element_matrix_mpi, Can_create_vector) { + const int size_test = 10; + const int upper_border_test = 100; + const int lower_border_test = -100; + EXPECT_NO_THROW(getRandomVector(size_test, upper_border_test, lower_border_test)); +} + +TEST(ermilova_d_min_element_matrix_mpi, Cant_create_incorrect_size_vector) { + const int size_test = -10; + const int upper_border_test = 100; + const int lower_border_test = -100; + EXPECT_ANY_THROW(getRandomVector(size_test, upper_border_test, lower_border_test)); +} + +TEST(ermilova_d_min_element_matrix_mpi, Can_create_matrix) { + const int rows_test = 10; + const int cols_test = 10; + const int upper_border_test = 100; + const int lower_border_test = -100; + EXPECT_NO_THROW(getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test)); +} + +TEST(ermilova_d_min_element_matrix_mpi, Cant_create_incorrect_size_matrix) { + const int rows_test = -10; + const int cols_test = 0; + const int upper_border_test = 100; + const int lower_border_test = -100; + EXPECT_ANY_THROW(getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test)); +} + +TEST(ermilova_d_min_element_matrix_mpi, Matrix_1x1) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + + const int rows_test = 1; + const int cols_test = 1; + const int upper_border_test = 100; + const int lower_border_test = -100; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count.emplace_back(rows_test); + taskDataPar->inputs_count.emplace_back(cols_test); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermilova_d_min_element_matrix_mpi, Matrix_10x10) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + const int rows_test = 10; + const int cols_test = 10; + const int upper_border_test = 100; + const int lower_border_test = -100; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count.emplace_back(rows_test); + taskDataPar->inputs_count.emplace_back(cols_test); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermilova_d_min_element_matrix_mpi, Matrix_100x100) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + const int rows_test = 100; + const int cols_test = 100; + const int upper_border_test = 500; + const int lower_border_test = -500; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count.emplace_back(rows_test); + taskDataPar->inputs_count.emplace_back(cols_test); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermilova_d_min_element_matrix_mpi, Matrix_100x50) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + const int rows_test = 100; + const int cols_test = 50; + const int upper_border_test = 1000; + const int lower_border_test = -1000; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count.emplace_back(rows_test); + taskDataPar->inputs_count.emplace_back(cols_test); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermilova_d_min_element_matrix_mpi, Matrix_50x100) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + const int rows_test = 50; + const int cols_test = 100; + const int upper_border_test = 500; + const int lower_border_test = -500; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count.emplace_back(rows_test); + taskDataPar->inputs_count.emplace_back(cols_test); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermilova_d_min_element_matrix_mpi, Matrix_500x500) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + const int rows_test = 500; + const int cols_test = 500; + const int upper_border_test = 500; + const int lower_border_test = -500; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count.emplace_back(rows_test); + taskDataPar->inputs_count.emplace_back(cols_test); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} diff --git a/tasks/mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp b/tasks/mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..3b7c7dfaa6d --- /dev/null +++ b/tasks/mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace ermilova_d_min_element_matrix_mpi { + +// std::vector getRandomVector(int size, int upper_border, int lower_border); +// std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res{}; + int cols, rows; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + int cols, rows; + boost::mpi::communicator world; +}; + +} // namespace ermilova_d_min_element_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/ermilova_d_min_element_matrix/perf_tests/main.cpp b/tasks/mpi/ermilova_d_min_element_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..b24b1dbe26f --- /dev/null +++ b/tasks/mpi/ermilova_d_min_element_matrix/perf_tests/main.cpp @@ -0,0 +1,140 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp" + +std::vector getRandomVector(int size, int upper_border, int lower_border) { + std::random_device dev; + std::mt19937 gen(dev()); + if (size <= 0) throw "Incorrect size"; + std::vector vec(size); + for (int i = 0; i < size; i++) { + vec[i] = lower_border + gen() % (upper_border - lower_border + 1); + } + return vec; +} + +std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border) { + if (rows <= 0 || cols <= 0) throw "Incorrect size"; + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = getRandomVector(cols, upper_border, lower_border); + } + return vec; +} + +TEST(ermilova_d_min_element_matrix_mpi, test_pipeline_run) { + std::random_device dev; + std::mt19937 gen(dev()); + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + int reference_min = -5000; + + if (world.rank() == 0) { + const int rows = 1000; + const int cols = 1000; + const int upper_border = 1000; + const int lower_border = -1000; + + global_matrix = getRandomMatrix(rows, cols, upper_border, lower_border); + + int rnd_rows = gen() % rows; + int rnd_cols = gen() % cols; + global_matrix[rnd_rows][rnd_cols] = reference_min; + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(reference_min, global_min[0]); + } +} + +TEST(ermilova_d_min_element_matrix_mpi, test_task_run) { + std::random_device dev; + std::mt19937 gen(dev()); + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + int reference_min = -5000; + + if (world.rank() == 0) { + const int rows = 1000; + const int cols = 1000; + const int upper_border = 1000; + const int lower_border = -1000; + + global_matrix = getRandomMatrix(rows, cols, upper_border, lower_border); + int rnd_rows = gen() % rows; + int rnd_cols = gen() % cols; + global_matrix[rnd_rows][rnd_cols] = reference_min; + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(reference_min, global_min[0]); + } +} diff --git a/tasks/mpi/ermilova_d_min_element_matrix/src/ops_mpi.cpp b/tasks/mpi/ermilova_d_min_element_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..60c64a89a2e --- /dev/null +++ b/tasks/mpi/ermilova_d_min_element_matrix/src/ops_mpi.cpp @@ -0,0 +1,125 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool ermilova_d_min_element_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + auto* tpr_ptr = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = tpr_ptr[j]; + } + } + // Init value for output + res = INT_MAX; + return true; +} + +bool ermilova_d_min_element_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0; +} + +bool ermilova_d_min_element_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + for (size_t i = 0; i < input_.size(); i++) { + for (size_t j = 0; j < input_[i].size(); j++) { + if (res > input_[i][j]) { + res = input_[i][j]; + } + } + } + return true; +} + +bool ermilova_d_min_element_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool ermilova_d_min_element_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + + input_ = std::vector(rows * cols); + + for (int i = 0; i < rows; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i * cols + j] = tmp_ptr[j]; + } + } + } + res = INT_MAX; + return true; +} + +bool ermilova_d_min_element_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1 && !(taskData->inputs.empty()); + } + return true; +} + +bool ermilova_d_min_element_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + unsigned int delta = 0; + unsigned int extra = 0; + + if (world.rank() == 0) { + delta = rows * cols / world.size(); + extra = rows * cols % world.size(); + } + + broadcast(world, delta, 0); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + delta * proc + extra, delta); + } + } + + local_input_ = std::vector(delta); + + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta + extra); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + int local_min = INT_MAX; + if (!local_input_.empty()) { + local_min = *std::min_element(local_input_.begin(), local_input_.end()); + } + reduce(world, local_min, res, boost::mpi::minimum(), 0); + return true; +} + +bool ermilova_d_min_element_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/ermilova_d_min_element_matrix/func_tests/main.cpp b/tasks/seq/ermilova_d_min_element_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..2f289eaab65 --- /dev/null +++ b/tasks/seq/ermilova_d_min_element_matrix/func_tests/main.cpp @@ -0,0 +1,236 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/ermilova_d_min_element_matrix/include/ops_seq.hpp" + +std::vector getRandomVector(int size, int upper_border, int lower_border) { + std::random_device dev; + std::mt19937 gen(dev()); + if (size <= 0) throw "Incorrect size"; + std::vector vec(size); + for (int i = 0; i < size; i++) { + vec[i] = lower_border + gen() % (upper_border - lower_border + 1); + } + return vec; +} + +std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border) { + if (rows <= 0 || cols <= 0) throw "Incorrect size"; + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = getRandomVector(cols, upper_border, lower_border); + } + return vec; +} + +TEST(ermilova_d_min_element_matrix_seq, Can_create_vector) { + const int size_test = 10; + const int upper_border_test = 100; + const int lower_border_test = -100; + EXPECT_NO_THROW(getRandomVector(size_test, upper_border_test, lower_border_test)); +} + +TEST(ermilova_d_min_element_matrix_seq, Cant_create_incorrect_vector) { + const int size_test = -10; + const int upper_border_test = 100; + const int lower_border_test = -100; + EXPECT_ANY_THROW(getRandomVector(size_test, upper_border_test, lower_border_test)); +} + +TEST(ermilova_d_min_element_matrix_seq, Can_create_matrix) { + const int rows_test = 10; + const int cols_test = 10; + const int upper_border_test = 100; + const int lower_border_test = -100; + EXPECT_NO_THROW(getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test)); +} + +TEST(ermilova_d_min_element_matrix_seq, Cant_create_incorrect_matrix) { + const int rows_test = -10; + const int cols_test = 0; + const int upper_border_test = 100; + const int lower_border_test = -100; + EXPECT_ANY_THROW(getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test)); +} + +TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_1x1) { + const int rows_test = 1; + const int cols_test = 1; + const int upper_border_test = 1000; + const int lower_border_test = -1000; + int reference_min = -5000; + + // Create data + std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + std::vector out(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + int rnd_rows = gen() % rows_test; + int rnd_cols = gen() % cols_test; + in[rnd_rows][rnd_cols] = reference_min; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(reference_min, out[0]); +} + +TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_10x10) { + const int rows_test = 10; + const int cols_test = 10; + const int upper_border_test = 100; + const int lower_border_test = -100; + int reference_min = -500; + + // Create data + std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + std::vector out(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + int rnd_rows = gen() % rows_test; + int rnd_cols = gen() % cols_test; + in[rnd_rows][rnd_cols] = reference_min; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(reference_min, out[0]); +} + +TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_100x100) { + const int rows_test = 100; + const int cols_test = 100; + const int upper_border_test = 1000; + const int lower_border_test = -1000; + int reference_min = -5000; + + // Create data + std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + std::vector out(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + int rnd_rows = gen() % rows_test; + int rnd_cols = gen() % cols_test; + in[rnd_rows][rnd_cols] = reference_min; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(reference_min, out[0]); +} + +TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_50x100) { + const int rows_test = 50; + const int cols_test = 100; + const int upper_border_test = 1000; + const int lower_border_test = -1000; + int reference_min = -5000; + + // Create data + std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + std::vector out(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + int rnd_rows = gen() % rows_test; + int rnd_cols = gen() % cols_test; + in[rnd_rows][rnd_cols] = reference_min; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(reference_min, out[0]); +} + +TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_100x50) { + const int rows_test = 100; + const int cols_test = 50; + const int upper_border_test = 1000; + const int lower_border_test = -1000; + int reference_min = -5000; + + // Create data + std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + std::vector out(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + int rnd_rows = gen() % rows_test; + int rnd_cols = gen() % cols_test; + in[rnd_rows][rnd_cols] = reference_min; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(reference_min, out[0]); +} diff --git a/tasks/seq/ermilova_d_min_element_matrix/include/ops_seq.hpp b/tasks/seq/ermilova_d_min_element_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..ae6a8b3640c --- /dev/null +++ b/tasks/seq/ermilova_d_min_element_matrix/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace ermilova_d_min_element_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res{}; + int cols, rows; +}; + +} // namespace ermilova_d_min_element_matrix_seq \ No newline at end of file diff --git a/tasks/seq/ermilova_d_min_element_matrix/perf_tests/main.cpp b/tasks/seq/ermilova_d_min_element_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..a780c7e7497 --- /dev/null +++ b/tasks/seq/ermilova_d_min_element_matrix/perf_tests/main.cpp @@ -0,0 +1,127 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/ermilova_d_min_element_matrix/include/ops_seq.hpp" + +std::vector getRandomVector(int size, int upper_border, int lower_border) { + std::random_device dev; + std::mt19937 gen(dev()); + if (size <= 0) throw "Incorrect size"; + std::vector vec(size); + for (int i = 0; i < size; i++) { + vec[i] = lower_border + gen() % (upper_border - lower_border + 1); + } + return vec; +} + +std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border) { + if (rows <= 0 || cols <= 0) throw "Incorrect size"; + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = getRandomVector(cols, upper_border, lower_border); + } + return vec; +} + +TEST(ermilova_d_min_element_matrix_seq, test_pipeline_run) { + const int rows_test = 1000; + const int cols_test = 1000; + const int upper_border_test = 1000; + const int lower_border_test = -1000; + int reference_min = -5000; + // Create data + std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + std::vector out(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + int rnd_rows = gen() % rows_test; + int rnd_cols = gen() % cols_test; + in[rnd_rows][rnd_cols] = reference_min; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(reference_min, out[0]); +} + +TEST(ermilova_d_min_element_matrix_seq, test_task_run) { + const int rows_test = 1000; + const int cols_test = 1000; + const int upper_border_test = 1000; + const int lower_border_test = -1000; + int reference_min = -5000; + + // Create data + std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); + std::vector out(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + int rnd_rows = gen() % rows_test; + int rnd_cols = gen() % cols_test; + in[rnd_rows][rnd_cols] = reference_min; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(rows_test); + taskDataSeq->inputs_count.emplace_back(cols_test); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(reference_min, out[0]); +} diff --git a/tasks/seq/ermilova_d_min_element_matrix/src/ops_seq.cpp b/tasks/seq/ermilova_d_min_element_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..e9a5fff0a54 --- /dev/null +++ b/tasks/seq/ermilova_d_min_element_matrix/src/ops_seq.cpp @@ -0,0 +1,55 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/ermilova_d_min_element_matrix/include/ops_seq.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool ermilova_d_min_element_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + auto* tpr_ptr = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = tpr_ptr[j]; + } + } + + // Init value for output + res = INT_MAX; + return true; +} + +bool ermilova_d_min_element_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0; +} + +bool ermilova_d_min_element_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); i++) { + for (size_t j = 0; j < input_[i].size(); j++) { + if (res > input_[i][j]) { + res = input_[i][j]; + } + } + } + return true; +} + +bool ermilova_d_min_element_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From 7d4a3ba4f0a96c052f43f3fee3a3c196a78a499a Mon Sep 17 00:00:00 2001 From: Sergey Borisov <66353172+SergeyBoRss@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:52:03 +0300 Subject: [PATCH 073/155] =?UTF-8?q?=D0=91=D0=BE=D1=80=D0=B8=D1=81=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2011.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D0=B7?= =?UTF-8?q?=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20?= =?UTF-8?q?=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82?= =?UTF-8?q?=D1=80=D0=B8=D1=86=D1=8B=20(#90)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SEQ: В последовательной реализации программа считывает матрицу и вычисляет сумму элементов в каждой строке путем простого прохода по всем элементам. Результаты суммирования сохраняются в выходном массиве, где каждый элемент соответствует сумме одной строки матрицы MPI: В параллельной реализации с использованием MPI матрица распределяется по процессам: главный процесс делит матрицу на части по строкам и отправляет каждую часть соответствующему процессу. Каждый процесс независимо вычисляет сумму своих строк. Затем локальные суммы собираются обратно на главном процессе, который объединяет их в итоговый массив сумм по строкам --- .../borisov_s_sum_of_rows/func_tests/main.cpp | 658 ++++++++++++++++++ .../borisov_s_sum_of_rows/include/ops_mpi.hpp | 44 ++ .../borisov_s_sum_of_rows/perf_tests/main.cpp | 110 +++ .../mpi/borisov_s_sum_of_rows/src/ops_mpi.cpp | 187 +++++ .../borisov_s_sum_of_rows/func_tests/main.cpp | 353 ++++++++++ .../borisov_s_sum_of_rows/include/ops_seq.hpp | 23 + .../borisov_s_sum_of_rows/perf_tests/main.cpp | 79 +++ .../seq/borisov_s_sum_of_rows/src/ops_seq.cpp | 79 +++ 8 files changed, 1533 insertions(+) create mode 100644 tasks/mpi/borisov_s_sum_of_rows/func_tests/main.cpp create mode 100644 tasks/mpi/borisov_s_sum_of_rows/include/ops_mpi.hpp create mode 100644 tasks/mpi/borisov_s_sum_of_rows/perf_tests/main.cpp create mode 100644 tasks/mpi/borisov_s_sum_of_rows/src/ops_mpi.cpp create mode 100644 tasks/seq/borisov_s_sum_of_rows/func_tests/main.cpp create mode 100644 tasks/seq/borisov_s_sum_of_rows/include/ops_seq.hpp create mode 100644 tasks/seq/borisov_s_sum_of_rows/perf_tests/main.cpp create mode 100644 tasks/seq/borisov_s_sum_of_rows/src/ops_seq.cpp diff --git a/tasks/mpi/borisov_s_sum_of_rows/func_tests/main.cpp b/tasks/mpi/borisov_s_sum_of_rows/func_tests/main.cpp new file mode 100644 index 00000000000..96fed52458c --- /dev/null +++ b/tasks/mpi/borisov_s_sum_of_rows/func_tests/main.cpp @@ -0,0 +1,658 @@ +#include + +#include +#include +#include +#include + +#include "mpi/borisov_s_sum_of_rows/include/ops_mpi.hpp" + +std::vector getRandomMatrix(size_t rows, size_t cols) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector matrix(rows * cols); + for (auto &element : matrix) { + element = static_cast(gen() % 100); + } + return matrix; +} + +TEST(borisov_s_sum_of_rows, Test_Unit_Matrix) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + if (world.rank() == 0) { + global_matrix.resize(rows * cols, 1); + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(0); + } + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + ASSERT_EQ(sumOfRowsTaskParallel.validation(), true); + + sumOfRowsTaskParallel.pre_processing(); + sumOfRowsTaskParallel.run(); + sumOfRowsTaskParallel.post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_row_sums.size(); i++) { + ASSERT_EQ(global_row_sums[i], 10); + } + } +} + +TEST(borisov_s_sum_of_rows, Test_Zero_Matrix) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + if (world.rank() == 0) { + global_matrix.resize(rows * cols, 0); + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(0); + } + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + ASSERT_EQ(sumOfRowsTaskParallel.validation(), true); + + sumOfRowsTaskParallel.pre_processing(); + sumOfRowsTaskParallel.run(); + sumOfRowsTaskParallel.post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_row_sums.size(); i++) { + ASSERT_EQ(global_row_sums[i], 0); + } + } +} + +TEST(borisov_s_sum_of_rows, Test_Sum_Rows) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 15; + size_t cols = 15; + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, cols); + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(0); + } + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + ASSERT_EQ(sumOfRowsTaskParallel.validation(), true); + + sumOfRowsTaskParallel.pre_processing(); + sumOfRowsTaskParallel.run(); + sumOfRowsTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_row_sums(global_row_sums.size(), 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_row_sums.data())); + taskDataSeq->outputs_count.push_back(reference_row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataSeq); + ASSERT_EQ(sumOfRowsTaskSequential.validation(), true); + sumOfRowsTaskSequential.pre_processing(); + sumOfRowsTaskSequential.run(); + sumOfRowsTaskSequential.post_processing(); + + for (size_t i = 0; i < global_row_sums.size(); i++) { + ASSERT_EQ(reference_row_sums[i], global_row_sums[i]); + } + } +} + +TEST(borisov_s_sum_of_rows, Test_Empty_Matrix) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 0; + size_t cols = 0; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + global_row_sums.resize(rows, 0); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + + ASSERT_FALSE(sumOfRowsTaskParallel.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Empty_Matrix1) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 1; + size_t cols = 0; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + global_row_sums.resize(rows, 0); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + + ASSERT_FALSE(sumOfRowsTaskParallel.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Empty_Matrix2) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 0; + size_t cols = 1; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + global_row_sums.resize(rows, 0); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + + ASSERT_FALSE(sumOfRowsTaskParallel.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_NonDivisibleRows) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 17; + size_t cols = 10; + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, cols); + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(0); + } + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + ASSERT_EQ(sumOfRowsTaskParallel.validation(), true); + + sumOfRowsTaskParallel.pre_processing(); + sumOfRowsTaskParallel.run(); + sumOfRowsTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_row_sums(global_row_sums.size(), 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_row_sums.data())); + taskDataSeq->outputs_count.push_back(reference_row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataSeq); + ASSERT_EQ(sumOfRowsTaskSequential.validation(), true); + sumOfRowsTaskSequential.pre_processing(); + sumOfRowsTaskSequential.run(); + sumOfRowsTaskSequential.post_processing(); + + for (size_t i = 0; i < global_row_sums.size(); i++) { + ASSERT_EQ(reference_row_sums[i], global_row_sums[i]); + } + } +} + +TEST(borisov_s_sum_of_rows, Test_Large_Matrix) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10000; + size_t cols = 1000; + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, cols); + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(0); + } + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + ASSERT_EQ(sumOfRowsTaskParallel.validation(), true); + + sumOfRowsTaskParallel.pre_processing(); + sumOfRowsTaskParallel.run(); + sumOfRowsTaskParallel.post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_row_sums.size(); i++) { + ASSERT_GE(global_row_sums[i], 0); + } + } +} + +TEST(borisov_s_sum_of_rows, Test_Max_Min_Int) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 2; + size_t cols = 2; + + if (world.rank() == 0) { + global_matrix = {INT_MAX, INT_MIN, INT_MAX, INT_MIN}; + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(0); + } + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + ASSERT_EQ(sumOfRowsTaskParallel.validation(), true); + + sumOfRowsTaskParallel.pre_processing(); + sumOfRowsTaskParallel.run(); + sumOfRowsTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_row_sums = {INT_MAX + INT_MIN, INT_MAX + INT_MIN}; + for (size_t i = 0; i < rows; i++) { + ASSERT_EQ(reference_row_sums[i], global_row_sums[i]); + } + } +} + +TEST(borisov_s_sum_of_rows, Test_Same_Numbers_In_Row) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 5; + size_t cols = 5; + + if (world.rank() == 0) { + global_matrix.resize(rows * cols); + for (size_t i = 0; i < rows; ++i) { + int value = static_cast(i + 1); + for (size_t j = 0; j < cols; ++j) { + global_matrix[(i * cols) + j] = value; + } + } + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(0); + } + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + ASSERT_EQ(sumOfRowsTaskParallel.validation(), true); + + sumOfRowsTaskParallel.pre_processing(); + sumOfRowsTaskParallel.run(); + sumOfRowsTaskParallel.post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < rows; i++) { + int expected_sum = static_cast((i + 1) * cols); + ASSERT_EQ(global_row_sums[i], expected_sum); + } + } +} + +TEST(borisov_s_sum_of_rows, Test_Null_Pointers) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + ASSERT_FALSE(sumOfRowsTaskParallel.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Invalid_Output_Count_Sequential) { + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector global_matrix; + std::vector global_row_sums; + + size_t rows = 10; + size_t cols = 10; + + global_matrix.resize(rows * cols, 1); + global_row_sums.resize(rows - 1, 0); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataSeq->outputs_count.push_back(global_row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataSeq); + + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Invalid_Output_Counts_Parallel) { + std::shared_ptr taskDataPar = std::make_shared(); + std::vector global_matrix; + std::vector global_row_sums; + + size_t rows = 10; + size_t cols = 10; + + global_matrix.resize(rows * cols, 1); + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(7); + + borisov_s_sum_of_rows::SumOfRowsTaskParallel sumOfRowsTaskParallel(taskDataPar); + + ASSERT_FALSE(sumOfRowsTaskParallel.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Invalid_Input_Output_Size_Sequential) { + std::shared_ptr taskDataSeq = std::make_shared(); + + size_t rows = 10; + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->outputs_count.push_back(0); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataSeq); + + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Validation_Invalid_Output_Count_Sequential) { + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector global_matrix; + std::vector global_row_sums; + + size_t rows = 10; + size_t cols = 10; + + global_matrix.resize(rows * cols, 1); + global_row_sums.resize(rows - 1, 0); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataSeq->outputs_count.push_back(global_row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataSeq); + + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Validation_Cols_Less_Than_Or_Equal_To_Zero) { + std::shared_ptr taskDataSeq = std::make_shared(); + + size_t rows = 10; + size_t cols = 0; + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataSeq); + + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Validation_Null_Pointers) { + std::shared_ptr taskDataSeq = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + taskDataSeq->inputs.emplace_back(nullptr); + taskDataSeq->outputs.emplace_back(nullptr); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataSeq); + + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Run_NonEmpty_Matrix_Sequential) { + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector global_matrix; + std::vector global_row_sums; + + size_t rows = 3; + size_t cols = 3; + + global_matrix = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + global_row_sums.resize(rows, 0); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataSeq->outputs_count.push_back(global_row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataSeq); + + ASSERT_TRUE(sumOfRowsTaskSequential.validation()); + ASSERT_TRUE(sumOfRowsTaskSequential.pre_processing()); + ASSERT_TRUE(sumOfRowsTaskSequential.run()); + + ASSERT_TRUE(sumOfRowsTaskSequential.post_processing()); + + std::vector expected_sums = {6, 15, 24}; + for (size_t i = 0; i < rows; i++) { + ASSERT_EQ(global_row_sums[i], expected_sums[i]); + } +} + +TEST(borisov_s_sum_of_rows, Test_Null_One_Pointers1) { + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + std::vector matrix(rows * cols, 1); + + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataPar); + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Null_One_Pointers2) { + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + std::vector row_sums(rows, 0); + + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataPar); + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Null_One_Pointers1_Parallel) { + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + std::vector matrix(rows * cols, 1); + + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskParallel SumOfRowsTaskParallel(taskDataPar); + ASSERT_FALSE(SumOfRowsTaskParallel.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Null_One_Pointers2_Parallel) { + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + std::vector row_sums(rows, 0); + + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskParallel SumOfRowsTaskParallel(taskDataPar); + ASSERT_FALSE(SumOfRowsTaskParallel.validation()); +} \ No newline at end of file diff --git a/tasks/mpi/borisov_s_sum_of_rows/include/ops_mpi.hpp b/tasks/mpi/borisov_s_sum_of_rows/include/ops_mpi.hpp new file mode 100644 index 00000000000..54266dd8fd6 --- /dev/null +++ b/tasks/mpi/borisov_s_sum_of_rows/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace borisov_s_sum_of_rows { + +class SumOfRowsTaskSequential : public ppc::core::Task { + public: + explicit SumOfRowsTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> matrix_; + std::vector row_sums_; +}; + +class SumOfRowsTaskParallel : public ppc::core::Task { + public: + explicit SumOfRowsTaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix_, loc_matrix_; + std::vector row_sums_, loc_row_sums_; + boost::mpi::communicator world; +}; + +} // namespace borisov_s_sum_of_rows \ No newline at end of file diff --git a/tasks/mpi/borisov_s_sum_of_rows/perf_tests/main.cpp b/tasks/mpi/borisov_s_sum_of_rows/perf_tests/main.cpp new file mode 100644 index 00000000000..8fb921a22ab --- /dev/null +++ b/tasks/mpi/borisov_s_sum_of_rows/perf_tests/main.cpp @@ -0,0 +1,110 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/borisov_s_sum_of_rows/include/ops_mpi.hpp" + +TEST(borisov_s_sum_of_rows, Test_Pipeline_Run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + size_t rows = 5000; + size_t cols = 5000; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix.resize(rows * cols, 1); + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(0); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel->validation()); + + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (size_t i = 0; i < rows; ++i) { + ASSERT_EQ(global_row_sums[i], 5000); + } + } +} + +TEST(borisov_s_sum_of_rows, Test_Task_Run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_row_sums; + + size_t rows = 5000; + size_t cols = 5000; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix.resize(rows * cols, 1); + global_row_sums.resize(rows, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_row_sums.data())); + taskDataPar->outputs_count.push_back(global_row_sums.size()); + } else { + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(0); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel->validation()); + + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (size_t i = 0; i < rows; ++i) { + ASSERT_EQ(global_row_sums[i], 5000); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/borisov_s_sum_of_rows/src/ops_mpi.cpp b/tasks/mpi/borisov_s_sum_of_rows/src/ops_mpi.cpp new file mode 100644 index 00000000000..d31cef2aad0 --- /dev/null +++ b/tasks/mpi/borisov_s_sum_of_rows/src/ops_mpi.cpp @@ -0,0 +1,187 @@ +#include "mpi/borisov_s_sum_of_rows/include/ops_mpi.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool borisov_s_sum_of_rows::SumOfRowsTaskSequential ::pre_processing() { + internal_order_test(); + + size_t rows = taskData->inputs_count[0]; + size_t cols = taskData->inputs_count[1]; + + if (rows > 0 && cols > 0) { + matrix_.resize(rows, std::vector(cols)); + int* data = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < rows; i++) { + for (size_t j = 0; j < cols; j++) { + matrix_[i][j] = data[(i * cols) + j]; + } + } + } + + row_sums_.resize(rows, 0); + return true; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskSequential::validation() { + internal_order_test(); + + if (taskData->outputs_count[0] != taskData->inputs_count[0]) { + return false; + } + + size_t cols = taskData->inputs_count.size() > 1 ? taskData->inputs_count[1] : 0; + if (cols <= 0) { + return false; + } + + if (taskData->inputs[0] == nullptr || taskData->outputs[0] == nullptr) { + return false; + } + + return true; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskSequential ::run() { + internal_order_test(); + + if (!matrix_.empty() && !matrix_[0].empty()) { + for (size_t i = 0; i < matrix_.size(); i++) { + int row_sum = 0; + for (size_t j = 0; j < matrix_[i].size(); j++) { + row_sum += matrix_[i][j]; + } + row_sums_[i] = row_sum; + } + } + return true; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskSequential ::post_processing() { + internal_order_test(); + + if (!row_sums_.empty()) { + int* out = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < row_sums_.size(); i++) { + out[i] = row_sums_[i]; + } + } + return true; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskParallel::pre_processing() { + internal_order_test(); + return true; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskParallel::validation() { + internal_order_test(); + + bool is_valid = true; + + if (world.rank() == 0) { + if (taskData->outputs_count[0] != taskData->inputs_count[0]) { + is_valid = false; + } + + size_t cols = taskData->inputs_count[1]; + if (cols == 0) { + is_valid = false; + } + + if (taskData->inputs[0] == nullptr || taskData->outputs[0] == nullptr) { + is_valid = false; + } + } + + boost::mpi::broadcast(world, is_valid, 0); + + return is_valid; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskParallel::run() { + internal_order_test(); + + size_t rows = 0; + size_t cols = 0; + + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + } + + boost::mpi::broadcast(world, rows, 0); + boost::mpi::broadcast(world, cols, 0); + + size_t base_rows_per_proc = rows / world.size(); + int remainder_rows = static_cast(rows % world.size()); + + size_t local_rows = base_rows_per_proc + (world.rank() < remainder_rows ? 1 : 0); + + std::vector sendcounts(world.size()); + std::vector displs(world.size()); + + if (world.rank() == 0) { + size_t offset = 0; + for (int i = 0; i < world.size(); i++) { + size_t rows_for_proc = base_rows_per_proc + (i < remainder_rows ? 1 : 0); + sendcounts[i] = static_cast(rows_for_proc * cols); + displs[i] = static_cast(offset * cols); + offset += rows_for_proc; + } + } + + loc_matrix_.resize(local_rows * cols); + + int* sendbuf = nullptr; + if (world.rank() == 0) { + sendbuf = reinterpret_cast(taskData->inputs[0]); + } + + MPI_Scatterv(sendbuf, sendcounts.data(), displs.data(), MPI_INT, loc_matrix_.data(), + static_cast(loc_matrix_.size()), MPI_INT, 0, MPI_COMM_WORLD); + + loc_row_sums_.resize(local_rows, 0); + + for (size_t i = 0; i < loc_row_sums_.size(); i++) { + loc_row_sums_[i] = 0; + for (size_t j = 0; j < cols; j++) { + loc_row_sums_[i] += loc_matrix_[(i * cols) + j]; + } + } + + if (world.rank() == 0) { + row_sums_.resize(taskData->inputs_count[0], 0); + } + + std::vector recvcounts(world.size()); + std::vector displs2(world.size()); + + size_t offset = 0; + for (int i = 0; i < world.size(); ++i) { + size_t rows_for_proc = base_rows_per_proc + (i < remainder_rows ? 1 : 0); + recvcounts[i] = static_cast(rows_for_proc); + displs2[i] = static_cast(offset); + offset += rows_for_proc; + } + + MPI_Gatherv(loc_row_sums_.data(), static_cast(loc_row_sums_.size()), MPI_INT, row_sums_.data(), + recvcounts.data(), displs2.data(), MPI_INT, 0, MPI_COMM_WORLD); + + return true; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + if (!row_sums_.empty()) { + int* out = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < row_sums_.size(); i++) { + out[i] = row_sums_[i]; + } + } + } + return true; +} diff --git a/tasks/seq/borisov_s_sum_of_rows/func_tests/main.cpp b/tasks/seq/borisov_s_sum_of_rows/func_tests/main.cpp new file mode 100644 index 00000000000..9a23b68ca60 --- /dev/null +++ b/tasks/seq/borisov_s_sum_of_rows/func_tests/main.cpp @@ -0,0 +1,353 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "seq/borisov_s_sum_of_rows/include/ops_seq.hpp" + +TEST(borisov_s_sum_of_rows, Test_Sum_Matrix_10) { + size_t rows = 10; + size_t cols = 10; + + // Create data + std::vector matrix(rows * cols, 1); + std::vector row_sums(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.emplace_back(row_sums.size()); + + // Create Task + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + ASSERT_TRUE(sumOfRowsTask.validation()); + + sumOfRowsTask.pre_processing(); + sumOfRowsTask.run(); + sumOfRowsTask.post_processing(); + + for (size_t i = 0; i < rows; i++) { + ASSERT_EQ(row_sums[i], 10); + } +} + +TEST(borisov_s_sum_of_rows, Test_Sum_Matrix_30) { + size_t rows = 30; + size_t cols = 30; + + // Create data + std::vector matrix(rows * cols, 1); + std::vector row_sums(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.emplace_back(row_sums.size()); + + // Create Task + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + ASSERT_TRUE(sumOfRowsTask.validation()); + + sumOfRowsTask.pre_processing(); + sumOfRowsTask.run(); + sumOfRowsTask.post_processing(); + + for (size_t i = 0; i < rows; i++) { + ASSERT_EQ(row_sums[i], 30); + } +} + +TEST(borisov_s_sum_of_rows, Test_Sum_Matrix_100) { + size_t rows = 100; + size_t cols = 100; + + // Create data + std::vector matrix(rows * cols, 1); + std::vector row_sums(rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.emplace_back(row_sums.size()); + + // Create Task + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + ASSERT_TRUE(sumOfRowsTask.validation()); + + sumOfRowsTask.pre_processing(); + sumOfRowsTask.run(); + sumOfRowsTask.post_processing(); + + for (size_t i = 0; i < rows; i++) { + ASSERT_EQ(row_sums[i], 100); + } +} + +TEST(borisov_s_sum_of_rows, EmptyMatrix) { + size_t rows = 0; + size_t cols = 0; + + std::vector matrix; + std::vector row_sums; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.push_back(row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + ASSERT_EQ(sumOfRowsTask.validation(), false); + + sumOfRowsTask.pre_processing(); + sumOfRowsTask.run(); + sumOfRowsTask.post_processing(); + + ASSERT_TRUE(row_sums.empty()); +} + +TEST(borisov_s_sum_of_rows, Test_Negative_Numbers) { + size_t rows = 5; + size_t cols = 5; + + std::vector matrix(rows * cols); + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution dist(-100, 100); + for (auto &elem : matrix) { + elem = dist(gen); + } + std::vector row_sums(rows, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.emplace_back(row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + ASSERT_TRUE(sumOfRowsTask.validation()); + + sumOfRowsTask.pre_processing(); + sumOfRowsTask.run(); + sumOfRowsTask.post_processing(); + + for (size_t i = 0; i < rows; i++) { + int expected_sum = 0; + for (size_t j = 0; j < cols; j++) { + expected_sum += matrix[(i * cols) + j]; + } + ASSERT_EQ(row_sums[i], expected_sum); + } +} + +TEST(borisov_s_sum_of_rows, Test_NonDivisibleDimensions) { + size_t rows = 7; + size_t cols = 3; + + std::vector matrix = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}; + std::vector row_sums(rows, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.emplace_back(row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + ASSERT_TRUE(sumOfRowsTask.validation()); + + sumOfRowsTask.pre_processing(); + sumOfRowsTask.run(); + sumOfRowsTask.post_processing(); + + for (size_t i = 0; i < rows; i++) { + int expected_sum = 0; + for (size_t j = 0; j < cols; j++) { + expected_sum += matrix[(i * cols) + j]; + } + ASSERT_EQ(row_sums[i], expected_sum); + } +} + +TEST(borisov_s_sum_of_rows, Test_Max_Min_Int) { + size_t rows = 2; + size_t cols = 2; + + std::vector matrix = {INT_MAX, INT_MIN, INT_MAX, INT_MIN}; + std::vector row_sums(rows, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.emplace_back(row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + ASSERT_TRUE(sumOfRowsTask.validation()); + + sumOfRowsTask.pre_processing(); + sumOfRowsTask.run(); + sumOfRowsTask.post_processing(); + + for (size_t i = 0; i < rows; i++) { + int expected_sum = INT_MAX + INT_MIN; + ASSERT_EQ(row_sums[i], expected_sum); + } +} + +TEST(borisov_s_sum_of_rows, Test_Single_Row_Matrix) { + size_t rows = 1; + size_t cols = 10; + + std::vector matrix(cols, 1); + std::vector row_sums(rows, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.emplace_back(row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + ASSERT_TRUE(sumOfRowsTask.validation()); + + sumOfRowsTask.pre_processing(); + sumOfRowsTask.run(); + sumOfRowsTask.post_processing(); + + ASSERT_EQ(row_sums[0], 10); +} + +TEST(borisov_s_sum_of_rows, Test_Single_Column_Matrix) { + size_t rows = 10; + size_t cols = 1; + + std::vector matrix(rows, 1); + std::vector row_sums(rows, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.emplace_back(row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + ASSERT_TRUE(sumOfRowsTask.validation()); + + sumOfRowsTask.pre_processing(); + sumOfRowsTask.run(); + sumOfRowsTask.post_processing(); + + for (size_t i = 0; i < rows; i++) { + ASSERT_EQ(row_sums[i], 1); + } +} + +TEST(borisov_s_sum_of_rows, Test_Null_Pointers) { + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataPar); + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Null_One_Pointers1) { + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + std::vector matrix(rows * cols, 1); + + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->outputs.emplace_back(nullptr); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataPar); + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Null_One_Pointers2) { + std::shared_ptr taskDataPar = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + std::vector row_sums(rows, 0); + + taskDataPar->inputs.emplace_back(nullptr); + taskDataPar->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTaskSequential(taskDataPar); + ASSERT_FALSE(sumOfRowsTaskSequential.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Validation_Invalid_Output_Count_Sequential) { + std::shared_ptr taskDataSeq = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + std::vector matrix(rows * cols, 1); + std::vector row_sums(rows - 1, 0); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.push_back(row_sums.size()); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + + ASSERT_FALSE(sumOfRowsTask.validation()); +} + +TEST(borisov_s_sum_of_rows, Test_Validation_Null_Inputs_Outputs_Sequential) { + std::shared_ptr taskDataSeq = std::make_shared(); + + size_t rows = 10; + size_t cols = 10; + + taskDataSeq->inputs.emplace_back(nullptr); + taskDataSeq->outputs.emplace_back(nullptr); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs_count.push_back(rows); + + borisov_s_sum_of_rows::SumOfRowsTaskSequential sumOfRowsTask(taskDataSeq); + + ASSERT_FALSE(sumOfRowsTask.validation()); +} \ No newline at end of file diff --git a/tasks/seq/borisov_s_sum_of_rows/include/ops_seq.hpp b/tasks/seq/borisov_s_sum_of_rows/include/ops_seq.hpp new file mode 100644 index 00000000000..f89f900eda7 --- /dev/null +++ b/tasks/seq/borisov_s_sum_of_rows/include/ops_seq.hpp @@ -0,0 +1,23 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace borisov_s_sum_of_rows { + +class SumOfRowsTaskSequential : public ppc::core::Task { + public: + explicit SumOfRowsTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix_; + std::vector row_sums_; +}; + +} // namespace borisov_s_sum_of_rows \ No newline at end of file diff --git a/tasks/seq/borisov_s_sum_of_rows/perf_tests/main.cpp b/tasks/seq/borisov_s_sum_of_rows/perf_tests/main.cpp new file mode 100644 index 00000000000..bd7962a0d96 --- /dev/null +++ b/tasks/seq/borisov_s_sum_of_rows/perf_tests/main.cpp @@ -0,0 +1,79 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/borisov_s_sum_of_rows/include/ops_seq.hpp" + +TEST(borisov_s_sum_of_rows, test_pipeline_run) { + size_t rows = 5000; + size_t cols = 5000; + + std::vector matrix(rows * cols, 1); + std::vector row_sums(rows, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.push_back(row_sums.size()); + + auto sumOfRowsTask = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(sumOfRowsTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (size_t i = 0; i < rows; i++) { + ASSERT_EQ(row_sums[i], 5000); + } +} + +TEST(borisov_s_sum_of_rows, test_task_run) { + size_t rows = 5000; + size_t cols = 5000; + + std::vector matrix(rows * cols, 1); + std::vector row_sums(rows, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(row_sums.data())); + taskDataSeq->outputs_count.push_back(row_sums.size()); + + auto sumOfRowsTask = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(sumOfRowsTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (size_t i = 0; i < rows; i++) { + ASSERT_EQ(row_sums[i], 5000); + } +} diff --git a/tasks/seq/borisov_s_sum_of_rows/src/ops_seq.cpp b/tasks/seq/borisov_s_sum_of_rows/src/ops_seq.cpp new file mode 100644 index 00000000000..5b7bdcc8d10 --- /dev/null +++ b/tasks/seq/borisov_s_sum_of_rows/src/ops_seq.cpp @@ -0,0 +1,79 @@ +#include "seq/borisov_s_sum_of_rows/include/ops_seq.hpp" + +using namespace std::chrono_literals; + +bool borisov_s_sum_of_rows::SumOfRowsTaskSequential::pre_processing() { + internal_order_test(); + + size_t rows = taskData->inputs_count[0]; + size_t cols = taskData->inputs_count[1]; + + if (rows <= 0 || cols <= 0) { + return false; + } + + int* data = reinterpret_cast(taskData->inputs[0]); + if (data == nullptr) { + return false; + } + + matrix_.resize(rows * cols); + + for (size_t i = 0; i < rows; i++) { + for (size_t j = 0; j < cols; j++) { + matrix_[(i * cols) + j] = data[(i * cols) + j]; + } + } + + row_sums_.resize(rows, 0); + return true; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskSequential::validation() { + internal_order_test(); + + if (taskData->outputs_count[0] != taskData->inputs_count[0]) { + return false; + } + + size_t cols = taskData->inputs_count.size() > 1 ? taskData->inputs_count[1] : 0; + if (cols <= 0) { + return false; + } + + if (taskData->inputs[0] == nullptr || taskData->outputs[0] == nullptr) { + return false; + } + + return true; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskSequential::run() { + internal_order_test(); + + size_t rows = taskData->inputs_count[0]; + size_t cols = taskData->inputs_count[1]; + + if (!matrix_.empty() && row_sums_.size() == rows) { + for (size_t i = 0; i < rows; i++) { + int row_sum = 0; + for (size_t j = 0; j < cols; j++) { + row_sum += matrix_[(i * cols) + j]; + } + row_sums_[i] = row_sum; + } + } + return true; +} + +bool borisov_s_sum_of_rows::SumOfRowsTaskSequential ::post_processing() { + internal_order_test(); + + if (!row_sums_.empty()) { + int* out = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < row_sums_.size(); i++) { + out[i] = row_sums_[i]; + } + } + return true; +} From 246db33711440221573d4ea9e4561e37b9b9180f Mon Sep 17 00:00:00 2001 From: Seraphim Volochaev <116020688+Svoloch2940194@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:53:06 +0300 Subject: [PATCH 074/155] =?UTF-8?q?=D0=92=D0=BE=D0=BB=D0=BE=D1=87=D0=B0?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=A1=D0=B5=D1=80=D0=B0=D1=84=D0=B8=D0=BC=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2027:=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87?= =?UTF-8?q?=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D0=BD=D0=B5?= =?UTF-8?q?=D1=81=D0=BE=D0=B2=D0=BF=D0=B0=D0=B4=D0=B0=D1=8E=D1=89=D0=B8?= =?UTF-8?q?=D1=85=20=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=B4=D0=B2=D1=83=D1=85=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=20(#9?= =?UTF-8?q?2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательный вариант: Input: 2 строки (могут быть разных длин). Если длины не совпадают, то начальное состояние ответа: разность длин 2 строк, иначе 0. Проходимся линейно по строкам и сравниваем по id. Если буквы на одинаковых позициях не совпадают, то прибавляем к ответу 2, иначе идем дальше до минимальной длины из 2 строк. pre_processing: Получение данных. Мы берем 2 строки и смотрим на их длину. Если разные, то обрезаем большую до длины меньшей, а разницу в длине помещаем в ответ. validation: проверяем входные и выходные данные по количеству ( входных должно быть 2, а выходных 1) run: идем по 2 строкам и сравниваем символы по id. Если не совпадают, то прибавляем 2. Иначе идем дальше. post_processing: запись результата. Параллельный вариант: идея та же, но мы бьем строки на потоки. Если у нас размер минимальной строки size, то мы size делим на world.size() и получаем сколько должен обработать каждый поток символов. далее бьем на потоки таким образом: 1 поток: [0, x] 2 поток: [x+1, 2x] 3 поток: [2x+1, 3*x] ... n-й поток: [(n-1)x+1,nx] ... --------- Co-authored-by: Svolota --- .../func_tests/main.cpp | 229 ++++++++++++++++++ .../include/ops_mpi.hpp | 45 ++++ .../perf_tests/main.cpp | 91 +++++++ .../src/ops_mpi.cpp | 126 ++++++++++ .../func_tests/main.cpp | 184 ++++++++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 81 +++++++ .../src/ops_seq.cpp | 52 ++++ 8 files changed, 832 insertions(+) create mode 100644 tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp create mode 100644 tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp create mode 100644 tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp create mode 100644 tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp create mode 100644 tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp create mode 100644 tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp create mode 100644 tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp create mode 100644 tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp diff --git a/tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp b/tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp new file mode 100644 index 00000000000..c9d9adcd7e9 --- /dev/null +++ b/tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp @@ -0,0 +1,229 @@ +#include + +#include +#include +#include +#include + +#include "mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp" + +namespace volochaev_s_count_characters_27_mpi { + +std::string get_random_string(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + + std::string vec(sz, ' '); + for (int i = 0; i < sz; i++) { + vec[i] += gen() % 256; + } + return vec; +} + +} // namespace volochaev_s_count_characters_27_mpi + +TEST(volochaev_s_count_characters_27_MPI, Test_0) { + boost::mpi::communicator world; + std::vector global_vec(1, volochaev_s_count_characters_27_mpi::get_random_string(20)); + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(volochaev_s_count_characters_27_MPI, Test_1) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int size_str1 = 240; + const int size_str2 = 120; + global_vec = {volochaev_s_count_characters_27_mpi::get_random_string(size_str1), + volochaev_s_count_characters_27_mpi::get_random_string(size_str2)}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + } + + volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_diff[0], global_diff[0]); + } +} + +TEST(volochaev_s_count_characters_27_MPI, Test_2) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int string_sz = 120; + std::string s = volochaev_s_count_characters_27_mpi::get_random_string(string_sz); + global_vec = {s, s}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + } + + volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_diff[0], global_diff[0]); + } +} + +TEST(volochaev_s_count_characters_27_MPI, Test_3) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int size_str1 = 240; + const int size_str2 = 120; + global_vec = {volochaev_s_count_characters_27_mpi::get_random_string(size_str2), + volochaev_s_count_characters_27_mpi::get_random_string(size_str1)}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(volochaev_s_count_characters_27_MPI, Test_4) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int size_str = 120; + global_vec = {volochaev_s_count_characters_27_mpi::get_random_string(size_str), + volochaev_s_count_characters_27_mpi::get_random_string(size_str)}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp b/tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp new file mode 100644 index 00000000000..582a1e3724e --- /dev/null +++ b/tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace volochaev_s_count_characters_27_mpi { + +class Lab1_27_seq : public ppc::core::Task { + public: + explicit Lab1_27_seq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res{}; +}; + +class Lab1_27_mpi : public ppc::core::Task { + public: + explicit Lab1_27_mpi(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_, local_input_; + int res{}; + int del{}; + boost::mpi::communicator world; +}; + +} // namespace volochaev_s_count_characters_27_mpi \ No newline at end of file diff --git a/tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp b/tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp new file mode 100644 index 00000000000..bfe2e510e3f --- /dev/null +++ b/tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp @@ -0,0 +1,91 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp" + +TEST(volochaev_s_count_characters_27_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_string; + if (world.rank() == 0) { + count_size_string = 200000000; + std::string s(count_size_string, ' '); + global_vec = std::vector(2, s); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(2, global_sum[0]); + } +} + +TEST(volochaev_s_count_characters_27_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int string_size; + if (world.rank() == 0) { + string_size = 200000000; + std::string s(string_size, ' '); + global_vec = std::vector(2, s); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_sum[0]); + } +} diff --git a/tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp b/tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp new file mode 100644 index 00000000000..90d1a84e6a7 --- /dev/null +++ b/tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp @@ -0,0 +1,126 @@ +#include "mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::pre_processing() { + internal_order_test(); + // Init vectors + auto tmp1 = reinterpret_cast(taskData->inputs[0])[0]; + auto tmp2 = reinterpret_cast(taskData->inputs[0])[1]; + + input_ = std::vector>(std::min(tmp1.size(), tmp2.size())); + + for (size_t i = 0; i < std::min(tmp1.size(), tmp2.size()); i++) { + input_[i].first = tmp1[i]; + input_[i].second = tmp2[i]; + } + + // Init value for output + res = abs(static_cast(tmp1.size()) - static_cast(tmp2.size())); + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::run() { + internal_order_test(); + for (auto [x, y] : input_) { + if (x != y) { + res += 2; + } + } + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = res; + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::pre_processing() { + internal_order_test(); + + // Init value for output + res = 0; + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::validation() { + internal_order_test(); + + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::run() { + internal_order_test(); + + std::string tmp1; + std::string tmp2; + int delta = 0; + if (world.rank() == 0) { + tmp1 = reinterpret_cast(taskData->inputs[0])[0]; + tmp2 = reinterpret_cast(taskData->inputs[0])[1]; + + del = abs(static_cast(tmp1.size()) - static_cast(tmp2.size())); + + delta = static_cast(std::min(tmp1.size(), tmp2.size())) / world.size(); + if (taskData->inputs_count[0] % world.size() > 0u) ++delta; + } + + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // Init vectors + input_ = std::vector>(world.size() * delta); + + for (size_t i = 0; i < std::min(tmp1.size(), tmp2.size()); ++i) { + input_[i].first = tmp1[i]; + input_[i].second = tmp2[i]; + } + + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta); + } + } + + local_input_ = std::vector>(delta); + if (world.rank() == 0) { + local_input_ = std::vector>(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + int res1 = 0; + for (auto [x, y] : local_input_) { + if (x != y) { + res1 += 2; + } + } + reduce(world, res1, res, std::plus(), 0); + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res + del; + } + return true; +} diff --git a/tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp b/tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp new file mode 100644 index 00000000000..7c1154192eb --- /dev/null +++ b/tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp @@ -0,0 +1,184 @@ +#include + +#include +#include + +#include "seq/volochaev_s_count_characters_27/include/ops_seq.hpp" + +namespace volochaev_s_count_characters_27_seq { + +std::string get_random_string(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + + std::string vec(sz, ' '); + for (int i = 0; i < sz; i++) { + vec[i] += gen() % 256; + } + return vec; +} +} // namespace volochaev_s_count_characters_27_seq + +TEST(volochaev_s_count_characters_27_seq, Test_0) { + // Create data + std::vector in = {volochaev_s_count_characters_27_seq::get_random_string(20)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(volochaev_s_count_characters_27_seq, Test_1) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(20); + std::vector in(2, s); + std::vector out(1, 0); + + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_2) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(20); + std::string s1 = s; + + s1.back() = static_cast((static_cast(s1.back()) + 1) % 256); + + std::vector in = {s, s1}; + std::vector out(1, 0); + int ans = 2; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_3) { + // Create data + + std::string s = volochaev_s_count_characters_27_seq::get_random_string(6); + std::string s1 = s.substr(0, 2); + + std::vector in = {s, s1}; + std::vector out(1, 0); + int ans = 4; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_4) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(6); + std::string s1 = s.substr(0, 2); + + std::vector in = {s1, s}; + std::vector out(1, 0); + int ans = 4; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_5) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(6); + std::vector in(2, s); + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_6) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(7); + std::vector in(2, s); + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp b/tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp new file mode 100644 index 00000000000..7279e6fa3a8 --- /dev/null +++ b/tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace volochaev_s_count_characters_27_seq { + +class Lab1_27 : public ppc::core::Task { + public: + explicit Lab1_27(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int sz1, sz2; + int res{}; +}; + +} // namespace volochaev_s_count_characters_27_seq \ No newline at end of file diff --git a/tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp b/tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp new file mode 100644 index 00000000000..25c7995c367 --- /dev/null +++ b/tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/volochaev_s_count_characters_27/include/ops_seq.hpp" + +TEST(volochaev_s_count_characters_27_seq, test_pipeline_run) { + // Create data + std::string s(20000000, ' '); + std::vector in(2, s); + std::vector out(1, 0); + + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, test_task_run) { + // Create data + std::string s(20000000, ' '); + std::vector in(2, s); + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp b/tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp new file mode 100644 index 00000000000..498068ea5f9 --- /dev/null +++ b/tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp @@ -0,0 +1,52 @@ +#include "seq/volochaev_s_count_characters_27/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool volochaev_s_count_characters_27_seq::Lab1_27::pre_processing() { + internal_order_test(); + // Init value for input and output + std::string input1_ = reinterpret_cast(taskData->inputs[0])[0]; + std::string input2_ = reinterpret_cast(taskData->inputs[0])[1]; + + input_ = std::vector>(std::min(input1_.size(), input2_.size())); + + for (size_t i = 0; i < std::min(input1_.size(), input2_.size()); ++i) { + input_[i].first = input1_[i]; + input_[i].second = input2_[i]; + } + + sz1 = input1_.size(); + sz2 = input2_.size(); + res = 0; + return true; +} + +bool volochaev_s_count_characters_27_seq::Lab1_27::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool volochaev_s_count_characters_27_seq::Lab1_27::run() { + internal_order_test(); + + res = abs(sz1 - sz2); + + for (auto [x, y] : input_) { + if (x != y) { + res += 2; + } + } + + return true; +} + +bool volochaev_s_count_characters_27_seq::Lab1_27::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = res; + return true; +} From 3a91e8247f31266221488be3ea52624954fe5b41 Mon Sep 17 00:00:00 2001 From: Sozonov_Ilushka <113029719+sozozzya@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:53:58 +0300 Subject: [PATCH 075/155] =?UTF-8?q?=D0=A1=D0=BE=D0=B7=D0=BE=D0=BD=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=98=D0=BB=D1=8C=D1=8F.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=207.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5=20?= =?UTF-8?q?=D0=B1=D0=BB=D0=B8=D0=B7=D0=BA=D0=B8=D1=85=20=D1=81=D0=BE=D1=81?= =?UTF-8?q?=D0=B5=D0=B4=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5?= =?UTF-8?q?=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80?= =?UTF-8?q?=D0=B0.=20(#93)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание последовательной задачи:** 1. Создаём переменную min и инициализируем её максимальным возможным значением INT_MAX. 2. Проходим по вектору, начиная с первого элемента до предпоследнего, и находим модуль разности между текущим элементом и следующим за ним. 3. Если модуль разности между ними меньше текущего значения min, обновляем min и запоминаем индекс текущего элемента вектора. **Описание MPI задачи:** 1. Создаем новый вектор diff, состоящий из пар: модуль разности соседних элементов исходного вектора и соответствующего индекса. 2. Полученный вектор пар делится равномерно на сегменты по числу процессов. 3. Каждый сегмент данных отправляется от нулевого процесса остальным. 4. Каждый процесс находит наименьший элемент и соответствующий ему индекс в полученном сегменте данных. 5. После завершения локальных вычислений процессы возвращают результаты корневому процессу, где с помощью коллективной операции reduce находится наименьший модуль разности соседних элементов вектора и соответствующий индекс. --- .../func_tests/main.cpp | 323 ++++++++++++++++++ .../include/ops_mpi.hpp | 45 +++ .../perf_tests/main.cpp | 95 ++++++ .../src/ops_mpi.cpp | 108 ++++++ .../func_tests/main.cpp | 153 +++++++++ .../include/ops_seq.hpp | 23 ++ .../perf_tests/main.cpp | 87 +++++ .../src/ops_seq.cpp | 43 +++ 8 files changed, 877 insertions(+) create mode 100644 tasks/mpi/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/sozonov_i_nearest_neighbor_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp create mode 100644 tasks/seq/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/seq/sozonov_i_nearest_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp b/tasks/mpi/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..6d96709c987 --- /dev/null +++ b/tasks/mpi/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,323 @@ +#include + +#include +#include +#include +#include + +#include "mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp" + +namespace sozonov_i_nearest_neighbor_elements_mpi { + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +} // namespace sozonov_i_nearest_neighbor_elements_mpi + +TEST(sozonov_i_nearest_neighbor_elements_mpi, test_for_empty_vector) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(sozonov_i_nearest_neighbor_elements_mpi, test_on_10_elements) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(2, 0); + std::vector ans; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10; + global_vec = std::vector(count_size_vector); + std::iota(global_vec.begin(), global_vec.end(), 0); + global_vec[0] = 1; + ans = {1, 1}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum, ans); + ASSERT_EQ(global_ans, ans); + } +} + +TEST(sozonov_i_nearest_neighbor_elements_mpi, test_on_50_elements) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(2, 0); + std::vector ans; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 50; + global_vec = std::vector(count_size_vector); + std::iota(global_vec.begin(), global_vec.end(), 0); + global_vec[0] = 1; + ans = {1, 1}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum, ans); + ASSERT_EQ(global_ans, ans); + } +} + +TEST(sozonov_i_nearest_neighbor_elements_mpi, test_on_500_elements) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(2, 0); + std::vector ans; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 500; + global_vec = std::vector(count_size_vector); + std::iota(global_vec.begin(), global_vec.end(), 0); + global_vec[0] = 1; + ans = {1, 1}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum, ans); + ASSERT_EQ(global_ans, ans); + } +} + +TEST(sozonov_i_nearest_neighbor_elements_mpi, test_on_1000_elements) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(2, 0); + std::vector ans; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1000; + global_vec = std::vector(count_size_vector); + std::iota(global_vec.begin(), global_vec.end(), 0); + global_vec[0] = 1; + ans = {1, 1}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum, ans); + ASSERT_EQ(global_ans, ans); + } +} + +TEST(sozonov_i_nearest_neighbor_elements_mpi, test_random_on_500_elements) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 500; + global_vec = sozonov_i_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum, global_ans); + } +} + +TEST(sozonov_i_nearest_neighbor_elements_mpi, test_random_on_1000_elements) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1000; + global_vec = sozonov_i_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum, global_ans); + } +} \ No newline at end of file diff --git a/tasks/mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..91fa439a73b --- /dev/null +++ b/tasks/mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace sozonov_i_nearest_neighbor_elements_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector> diff, local_input_; + std::pair res; + boost::mpi::communicator world; +}; + +} // namespace sozonov_i_nearest_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..5042f0371a8 --- /dev/null +++ b/tasks/mpi/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,95 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp" + +TEST(sozonov_i_nearest_neighbor_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(2, 0); + std::vector ans(2, 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 1000000; + global_vec = std::vector(count_size_vector); + std::iota(global_vec.begin(), global_vec.end(), 0); + global_vec[0] = 1; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, global_ans); + } +} + +TEST(sozonov_i_nearest_neighbor_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(2, 0); + std::vector ans(2, 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10000000; + global_vec = std::vector(count_size_vector); + std::iota(global_vec.begin(), global_vec.end(), 0); + global_vec[0] = 1; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, global_ans); + } +} \ No newline at end of file diff --git a/tasks/mpi/sozonov_i_nearest_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/sozonov_i_nearest_neighbor_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..7ed936e151b --- /dev/null +++ b/tasks/mpi/sozonov_i_nearest_neighbor_elements/src/ops_mpi.cpp @@ -0,0 +1,108 @@ +#include "mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; ++i) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + res = 0; + return true; +} + +bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of input and output + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 2; +} + +bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + int min = INT_MAX; + for (size_t i = 0; i < input_.size() - 1; i++) { + if (abs(input_[i + 1] - input_[i]) < min) { + min = abs(input_[i + 1] - input_[i]); + res = i; + } + } + return true; +} + +bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = input_[res]; + reinterpret_cast(taskData->outputs[0])[1] = input_[res + 1]; + return true; +} + +bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; ++i) { + input_[i] = tmp_ptr[i]; + } + } + // Init value for output + res = {INT_MAX, -1}; + return true; +} + +bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of input and output + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 2; + } + return true; +} + +bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = (taskData->inputs_count[0] - 1) / world.size(); + } + broadcast(world, delta, 0); + if (world.rank() == 0) { + diff = std::vector>(taskData->inputs_count[0] - 1); + for (size_t i = 0; i < input_.size() - 1; ++i) { + diff[i] = {abs(input_[i + 1] - input_[i]), i}; + } + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, diff.data() + proc * delta, delta); + } + } + local_input_ = std::vector>(delta); + if (world.rank() == 0) { + local_input_ = std::vector>(diff.begin(), diff.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + std::pair local_res(INT_MAX, 0); + local_res = *std::min_element(local_input_.begin(), local_input_.end()); + reduce(world, local_res, res, boost::mpi::minimum>(), 0); + return true; +} + +bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = input_[res.second]; + reinterpret_cast(taskData->outputs[0])[1] = input_[res.second + 1]; + } + return true; +} diff --git a/tasks/seq/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp b/tasks/seq/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..3c0c892be20 --- /dev/null +++ b/tasks/seq/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,153 @@ +#include + +#include +#include + +#include "seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(sozonov_i_nearest_neighbor_elements_seq, test_for_empty_vector) { + // Create data + std::vector in; + std::vector out(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_10) { + const int count = 10; + + // Create data + std::vector in(count); + std::iota(in.begin(), in.end(), 0); + in[0] = 1; + std::vector out(2, 0); + std::vector ans(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out); +} + +TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_20) { + const int count = 20; + + // Create data + std::vector in(count); + std::iota(in.begin(), in.end(), 0); + in[0] = 1; + std::vector out(2, 0); + std::vector ans(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out); +} + +TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_50) { + const int count = 50; + + // Create data + std::vector in(count); + std::iota(in.begin(), in.end(), 0); + in[0] = 1; + std::vector out(2, 0); + std::vector ans(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out); +} + +TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_70) { + const int count = 70; + + // Create data + std::vector in(count); + std::iota(in.begin(), in.end(), 0); + in[0] = 1; + std::vector out(2, 0); + std::vector ans(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out); +} + +TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_100) { + const int count = 100; + + // Create data + std::vector in(count); + std::iota(in.begin(), in.end(), 0); + in[0] = 1; + std::vector out(2, 0); + std::vector ans(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out); +} \ No newline at end of file diff --git a/tasks/seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp b/tasks/seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..12606c9cde9 --- /dev/null +++ b/tasks/seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace sozonov_i_nearest_neighbor_elements_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; +}; + +} // namespace sozonov_i_nearest_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/seq/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..1aeffb7978d --- /dev/null +++ b/tasks/seq/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,87 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(sozonov_i_nearest_neighbor_elements_seq, test_pipeline_run) { + const int count = 10000000; + + // Create data + std::vector in(count); + std::iota(in.begin(), in.end(), 0); + in[0] = 1; + std::vector out(2, 0); + std::vector ans(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out); +} + +TEST(sozonov_i_nearest_neighbor_elements_seq, test_task_run) { + const int count = 10000000; + + // Create data + std::vector in(count); + std::iota(in.begin(), in.end(), 0); + in[0] = 1; + std::vector out(2, 0); + std::vector ans(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out); +} \ No newline at end of file diff --git a/tasks/seq/sozonov_i_nearest_neighbor_elements/src/ops_seq.cpp b/tasks/seq/sozonov_i_nearest_neighbor_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..95677138ddd --- /dev/null +++ b/tasks/seq/sozonov_i_nearest_neighbor_elements/src/ops_seq.cpp @@ -0,0 +1,43 @@ +#include "seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + res = 0; + return true; +} + +bool sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of input and output + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 2; +} + +bool sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential::run() { + internal_order_test(); + int min = INT_MAX; + for (size_t i = 0; i < input_.size() - 1; ++i) { + if (abs(input_[i + 1] - input_[i]) < min) { + min = abs(input_[i + 1] - input_[i]); + res = i; + } + } + return true; +} + +bool sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = input_[res]; + reinterpret_cast(taskData->outputs[0])[1] = input_[res + 1]; + return true; +} \ No newline at end of file From e3fb1731463cdde034e9ee7348e30deff1455179 Mon Sep 17 00:00:00 2001 From: erty1909 Date: Mon, 4 Nov 2024 03:55:18 +0300 Subject: [PATCH 076/155] =?UTF-8?q?=D0=9C=D0=B0=D1=82=D1=8E=D0=BD=D0=B8?= =?UTF-8?q?=D0=BD=D0=B0=20=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4?= =?UTF-8?q?=D1=80=D0=B0.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20?= =?UTF-8?q?=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=202.=20=D0=92=D1=8B?= =?UTF-8?q?=D1=87=D0=B8=D1=81=D0=BB=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=81=D1=80?= =?UTF-8?q?=D0=B5=D0=B4=D0=BD=D0=B5=D0=B3=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D1=8F=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0?= =?UTF-8?q?=20(#98)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit seq: На вход получаем вектор из элементов и должны найти среднее значение элементов этого вектора. Складываем значения каждого элемента вектора и записываем в одну переменную, а затем делим на общее число элементов. Таким образом мы найдём следнее значение. mpi: Делим один большой вектор значений на несколько отрезков и передаём эти отрезки различным процессам, размер отрезков определяется относительно количества процессов на котором мы будем работать. После этого складываем все значения полученные из разных процессов в главном процессе и в нём же делим это число на размер всего вектора. --- .../func_tests/main.cpp | 168 ++++++++++++++++++ .../include/ops_mpi.hpp | 45 +++++ .../perf_tests/main.cpp | 90 ++++++++++ .../src/ops_mpi.cpp | 103 +++++++++++ .../func_tests/main.cpp | 142 +++++++++++++++ .../include/ops_seq.hpp | 26 +++ .../perf_tests/main.cpp | 89 ++++++++++ .../src/ops_seq.cpp | 33 ++++ 8 files changed, 696 insertions(+) create mode 100644 tasks/mpi/matyunina_a_average_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/matyunina_a_average_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/matyunina_a_average_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/matyunina_a_average_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/matyunina_a_average_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/matyunina_a_average_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/matyunina_a_average_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/matyunina_a_average_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/matyunina_a_average_of_vector_elements/func_tests/main.cpp b/tasks/mpi/matyunina_a_average_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..5c98e86c71a --- /dev/null +++ b/tasks/mpi/matyunina_a_average_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,168 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/matyunina_a_average_of_vector_elements/include/ops_mpi.hpp" + +TEST(matyunina_a_average_of_vector_elements_mpi, CalculateAverageOfVectorElements) { + boost::mpi::communicator world; + std::vector global_vec(9); + std::vector average_value(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (int i = 0; i < 9; i++) { + global_vec[i] = i; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(average_value.data())); + taskDataPar->outputs_count.emplace_back(average_value.size()); + } + + matyunina_a_average_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + // Create Task + matyunina_a_average_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], average_value[0]); + ASSERT_EQ(4, average_value[0]); + } +} + +TEST(matyunina_a_average_of_vector_elements_mpi, CalculateAverageOfVectorElements_2) { + boost::mpi::communicator world; + std::vector global_vec(99); + std::vector average_value(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + for (int i = 0; i < 99; i++) { + global_vec[i] = i; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(average_value.data())); + taskDataPar->outputs_count.emplace_back(average_value.size()); + } + + matyunina_a_average_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + // Create Task + matyunina_a_average_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_sum[0], average_value[0]); + } +} + +TEST(matyunina_a_average_of_vector_elements_mpi, CalculateAverageOfVectorElements_3) { + boost::mpi::communicator world; + std::vector global_vec(999); + std::vector average_value(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + for (int i = 0; i < 999; i++) { + global_vec[i] = i; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(average_value.data())); + taskDataPar->outputs_count.emplace_back(average_value.size()); + } + + matyunina_a_average_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + // Create Task + matyunina_a_average_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_sum[0], average_value[0]); + } +} + +TEST(matyunina_a_average_of_vector_elements_mpi, Test_Standart) { + boost::mpi::communicator world; + std::vector global_vec(6); + std::vector average_value(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_vec = {-1, 2, -3, 4, 5, -1}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(average_value.data())); + taskDataPar->outputs_count.emplace_back(average_value.size()); + } + matyunina_a_average_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + // Create Task + matyunina_a_average_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_sum[0], average_value[0]); + } +} diff --git a/tasks/mpi/matyunina_a_average_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/matyunina_a_average_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..66be78e98e3 --- /dev/null +++ b/tasks/mpi/matyunina_a_average_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,45 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace matyunina_a_average_of_vector_elements_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res_{}; + boost::mpi::communicator world; +}; + +} // namespace matyunina_a_average_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/matyunina_a_average_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/matyunina_a_average_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..e8fc8e7ea5f --- /dev/null +++ b/tasks/mpi/matyunina_a_average_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,90 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/matyunina_a_average_of_vector_elements/include/ops_mpi.hpp" + +TEST(matyunina_a_average_of_vector_elements_mpi, test_1000) { + boost::mpi::communicator world; + std::vector global_vec(999); + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + for (int i = 0; i < 999; i++) { + global_vec[i] = i; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(499, global_sum[0]); + } +} + +TEST(matyunina_a_average_of_vector_elements_mpi, test_100) { + boost::mpi::communicator world; + std::vector global_vec(99); + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + for (int i = 0; i < 99; i++) { + global_vec[i] = i; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(49, global_sum[0]); + } +} diff --git a/tasks/mpi/matyunina_a_average_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/matyunina_a_average_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..fd45f7353e1 --- /dev/null +++ b/tasks/mpi/matyunina_a_average_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,103 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/matyunina_a_average_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool matyunina_a_average_of_vector_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + res_ = 0; + return true; +} + +bool matyunina_a_average_of_vector_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool matyunina_a_average_of_vector_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + res_ = std::accumulate(input_.begin(), input_.end(), 0); + res_ /= static_cast(input_.size()); + return true; +} + +bool matyunina_a_average_of_vector_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool matyunina_a_average_of_vector_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + res_ = 0; + return true; +} + +bool matyunina_a_average_of_vector_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool matyunina_a_average_of_vector_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int delta = 0; + unsigned int remainder = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + remainder = taskData->inputs_count[0] % world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + delta * proc + remainder, delta); + } + } + local_input_ = std::vector(delta); + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta + remainder); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + int local_res = 0; + for (size_t i = 0; i < local_input_.size(); i++) { + local_res += local_input_[i]; + } + + std::vector all; + boost::mpi::gather(world, local_res, all, 0); + + if (world.rank() == 0) { + for (int res : all) { + res_ += res; + } + res_ /= static_cast(input_.size()); + } + return true; +} + +bool matyunina_a_average_of_vector_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/seq/matyunina_a_average_of_vector_elements/func_tests/main.cpp b/tasks/seq/matyunina_a_average_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..151b18936c6 --- /dev/null +++ b/tasks/seq/matyunina_a_average_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,142 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/matyunina_a_average_of_vector_elements/include/ops_seq.hpp" + +TEST(matyunina_a_average_of_vector_elements_seq, Test_10_1) { + const int count = 10; + + // Create data + std::vector in(count, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + matyunina_a_average_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(matyunina_a_average_of_vector_elements_seq, Test_20_2) { + const int count = 20; + + // Create data + std::vector in(count, 2); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + matyunina_a_average_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(2, out[0]); +} + +TEST(matyunina_a_average_of_vector_elements_seq, Test_50_3) { + const int count = 50; + + // Create data + std::vector in(count, 3); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + matyunina_a_average_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(3, out[0]); +} + +TEST(matyunina_a_average_of_vector_elements_seq, Test_70_4) { + const int count = 70; + + // Create data + std::vector in(count, 4); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + matyunina_a_average_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(4, out[0]); +} + +TEST(matyunina_a_average_of_vector_elements_seq, Test_100_5) { + const int count = 100; + + // Create data + std::vector in(count, 5); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + matyunina_a_average_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(5, out[0]); +} + +TEST(matyunina_a_average_of_vector_elements_seq, Test_3) { + // Create data + std::vector in{1, 2, 3, 4, 5}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + matyunina_a_average_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(3, out[0]); +} diff --git a/tasks/seq/matyunina_a_average_of_vector_elements/include/ops_seq.hpp b/tasks/seq/matyunina_a_average_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..3f7d9ac4271 --- /dev/null +++ b/tasks/seq/matyunina_a_average_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace matyunina_a_average_of_vector_elements_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; + +} // namespace matyunina_a_average_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/matyunina_a_average_of_vector_elements/perf_tests/main.cpp b/tasks/seq/matyunina_a_average_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..cea552c3424 --- /dev/null +++ b/tasks/seq/matyunina_a_average_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/matyunina_a_average_of_vector_elements/include/ops_seq.hpp" + +TEST(matyunina_a_average_of_vector_elements_seq, test_100) { + const int count = 99; + + // Create data + std::vector in(count); + for (int i = 0; i < count; i++) { + in[i] = i; + } + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(49, out[0]); +} + +TEST(matyunina_a_average_of_vector_elements_seq, test_1000) { + const int count = 999; + + // Create data + std::vector in(count); + for (int i = 0; i < count; i++) { + in[i] = i; + } + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(499, out[0]); +} diff --git a/tasks/seq/matyunina_a_average_of_vector_elements/src/ops_seq.cpp b/tasks/seq/matyunina_a_average_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..c23923357d8 --- /dev/null +++ b/tasks/seq/matyunina_a_average_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,33 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/matyunina_a_average_of_vector_elements/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool matyunina_a_average_of_vector_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + res_ = 0; + return true; +} + +bool matyunina_a_average_of_vector_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool matyunina_a_average_of_vector_elements_seq::TestTaskSequential::run() { + internal_order_test(); + res_ = std::accumulate(input_.begin(), input_.end(), 0); + res_ /= static_cast(input_.size()); + return true; +} + +bool matyunina_a_average_of_vector_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} From b0e5dc4dd46e3d7cf9d3f0c4d445dcc37360570b Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 4 Nov 2024 08:59:28 +0800 Subject: [PATCH 077/155] =?UTF-8?q?Revert=20"=D0=A7=D0=B8=D0=B6=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2016.=20=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA=D1=81=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE?= =?UTF-8?q?=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8?= =?UTF-8?q?=D1=86=D1=8B"=20(#178)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#44 image --- .../func_tests/main.cpp | 355 ------------------ .../include/ops_mpi.hpp | 49 --- .../perf_tests/main.cpp | 100 ----- .../src/ops_mpi.cpp | 156 -------- .../func_tests/main.cpp | 146 ------- .../include/ops_seq.hpp | 26 -- .../perf_tests/main.cpp | 96 ----- .../src/ops_seq.cpp | 63 ---- 8 files changed, 991 deletions(-) delete mode 100644 tasks/mpi/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp delete mode 100644 tasks/mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp delete mode 100644 tasks/mpi/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp delete mode 100644 tasks/mpi/chizhov_m_max_values_by_columns_matrix/src/ops_mpi.cpp delete mode 100644 tasks/seq/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp delete mode 100644 tasks/seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp delete mode 100644 tasks/seq/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp delete mode 100644 tasks/seq/chizhov_m_max_values_by_columns_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp deleted file mode 100644 index 3883c3ea6ae..00000000000 --- a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include -#include -#include - -#include "mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp" - -std::vector getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - int val = gen() % 200 - 100; - if (val >= 0) { - vec[i] = val; - } - } - return vec; -} - -TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Zero_Columns) { - boost::mpi::communicator world; - - int cols = 0; - int rows = 0; - - std::vector matrix; - std::vector res_par(cols, 0); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = cols * rows; - matrix = getRandomVector(count_size_vector); - - taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataPar->inputs_count.emplace_back(matrix.size()); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); - taskDataPar->outputs_count.emplace_back(res_par.size()); - } - - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - - if (world.rank() == 0) { - ASSERT_FALSE(testMpiTaskParallel.validation()); - } -} - -TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Empty_Matrix) { - boost::mpi::communicator world; - - int cols = 5; - int rows = 5; - - std::vector matrix; - std::vector res_par(cols, 0); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataPar->inputs_count.emplace_back(matrix.size()); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); - taskDataPar->outputs_count.emplace_back(res_par.size()); - } - - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - - if (world.rank() == 0) { - ASSERT_FALSE(testMpiTaskParallel.validation()); - } -} - -TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max1) { - boost::mpi::communicator world; - - int cols = 15; - int rows = 5; - - std::vector matrix; - std::vector res_par(cols, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = cols * rows; - matrix = getRandomVector(count_size_vector); - - taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataPar->inputs_count.emplace_back(matrix.size()); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); - taskDataPar->outputs_count.emplace_back(res_par.size()); - } - - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector res_seq(cols, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); - taskDataSeq->outputs_count.emplace_back(res_seq.size()); - - // Create Task - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(res_seq, res_par); - } -} - -TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max2) { - boost::mpi::communicator world; - - int cols = 50; - int rows = 50; - - std::vector matrix; - std::vector res_par(cols, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = cols * rows; - matrix = getRandomVector(count_size_vector); - - taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataPar->inputs_count.emplace_back(matrix.size()); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); - taskDataPar->outputs_count.emplace_back(res_par.size()); - } - - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector res_seq(cols, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); - taskDataSeq->outputs_count.emplace_back(res_seq.size()); - - // Create Task - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(res_seq, res_par); - } -} - -TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max3) { - boost::mpi::communicator world; - - int cols = 50; - int rows = 100; - - std::vector matrix; - std::vector res_par(cols, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = cols * rows; - matrix = getRandomVector(count_size_vector); - - taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataPar->inputs_count.emplace_back(matrix.size()); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); - taskDataPar->outputs_count.emplace_back(res_par.size()); - } - - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector res_seq(cols, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); - taskDataSeq->outputs_count.emplace_back(res_seq.size()); - - // Create Task - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(res_seq, res_par); - } -} - -TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max4) { - boost::mpi::communicator world; - - int cols = 70; - int rows = 50; - - std::vector matrix; - std::vector res_par(cols, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = cols * rows; - matrix = getRandomVector(count_size_vector); - - taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataPar->inputs_count.emplace_back(matrix.size()); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); - taskDataPar->outputs_count.emplace_back(res_par.size()); - } - - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector res_seq(cols, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); - taskDataSeq->outputs_count.emplace_back(res_seq.size()); - - // Create Task - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(res_seq, res_par); - } -} - -TEST(chizhov_m_max_values_by_columns_matrix_mpi, Test_Max5) { - boost::mpi::communicator world; - - int cols = 300; - int rows = 150; - - std::vector matrix; - std::vector res_par(cols, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = cols * rows; - matrix = getRandomVector(count_size_vector); - - taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataPar->inputs_count.emplace_back(matrix.size()); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); - taskDataPar->outputs_count.emplace_back(res_par.size()); - } - - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector res_seq(cols, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); - taskDataSeq->outputs_count.emplace_back(res_seq.size()); - - // Create Task - chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(res_seq, res_par); - } -} \ No newline at end of file diff --git a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp deleted file mode 100644 index b8b41a0db71..00000000000 --- a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace chizhov_m_max_values_by_columns_matrix_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - std::vector res_{}; - int cols{}; - int rows{}; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - std::vector res_{}; - int cols{}; - int rows{}; - boost::mpi::communicator world; -}; - -} // namespace chizhov_m_max_values_by_columns_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp deleted file mode 100644 index 10362470263..00000000000 --- a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp" - -TEST(chizhov_m_max_values_by_columns_matrix_perf_test, test_pipeline_run) { - int rows = 1000; - int columns = 4000; - boost::mpi::communicator world; - std::vector matrix; - std::vector max_vec_mpi(columns, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - matrix = std::vector(rows * columns, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataPar->inputs_count.emplace_back(matrix.size()); - taskDataPar->inputs_count.emplace_back(columns); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->outputs.emplace_back(reinterpret_cast(max_vec_mpi.data())); - taskDataPar->outputs_count.emplace_back(max_vec_mpi.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - for (unsigned i = 0; i < max_vec_mpi.size(); i++) { - EXPECT_EQ(1, max_vec_mpi[0]); - } - } -} - -TEST(chizhov_m_max_values_by_columns_matrix_perf_test, test_task_run) { - int rows = 1000; - int columns = 4000; - boost::mpi::communicator world; - std::vector matrix; - std::vector max_vec_mpi(columns, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - matrix = std::vector(rows * columns, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataPar->inputs_count.emplace_back(matrix.size()); - taskDataPar->inputs_count.emplace_back(columns); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->outputs.emplace_back(reinterpret_cast(max_vec_mpi.data())); - taskDataPar->outputs_count.emplace_back(max_vec_mpi.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - for (unsigned i = 0; i < max_vec_mpi.size(); i++) { - EXPECT_EQ(1, max_vec_mpi[0]); - } - } -} \ No newline at end of file diff --git a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/src/ops_mpi.cpp b/tasks/mpi/chizhov_m_max_values_by_columns_matrix/src/ops_mpi.cpp deleted file mode 100644 index 29889a93cb1..00000000000 --- a/tasks/mpi/chizhov_m_max_values_by_columns_matrix/src/ops_mpi.cpp +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "mpi/chizhov_m_max_values_by_columns_matrix/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include - -bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - - cols = taskData->inputs_count[1]; - rows = taskData->inputs_count[2]; - - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - - res_ = std::vector(cols, 0); - - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - if (taskData->inputs_count[1] == 0 || taskData->inputs_count[2] == 0) { - return false; - } - if (taskData->inputs.empty() || taskData->inputs_count[0] <= 0) { - return false; - } - if (taskData->inputs_count[1] != taskData->outputs_count[0]) { - return false; - } - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential::run() { - internal_order_test(); - - for (int j = 0; j < cols; j++) { - int maxElement = input_[j]; - for (int i = 1; i < rows; i++) { - if (input_[i * cols + j] > maxElement) { - maxElement = input_[i * cols + j]; - } - } - res_[j] = maxElement; - } - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - for (int i = 0; i < cols; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res_[i]; - } - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - if (world.rank() == 0) { - cols = taskData->inputs_count[1]; - rows = taskData->inputs_count[2]; - } - - if (world.rank() == 0) { - // Init vectors - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - } else { - input_ = std::vector(cols * rows, 0); - } - - res_ = std::vector(cols, 0); - - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - if (taskData->inputs_count[1] == 0 || taskData->inputs_count[2] == 0) { - return false; - } - if (taskData->inputs.empty() || taskData->inputs_count[0] <= 0) { - return false; - } - if (taskData->inputs_count[1] != taskData->outputs_count[0]) { - return false; - } - } - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - broadcast(world, cols, 0); - broadcast(world, rows, 0); - - if (world.rank() != 0) { - input_ = std::vector(cols * rows, 0); - } - broadcast(world, input_.data(), cols * rows, 0); - - int delta = cols / world.size(); - int extra = cols % world.size(); - if (extra != 0) { - delta += 1; - } - int startCol = delta * world.rank(); - int lastCol = std::min(cols, delta * (world.rank() + 1)); - std::vector localMax; - for (int j = startCol; j < lastCol; j++) { - int maxElem = input_[j]; - for (int i = 1; i < rows; i++) { - int coor = i * cols + j; - if (input_[coor] > maxElem) { - maxElem = input_[coor]; - } - } - localMax.push_back(maxElem); - } - localMax.resize(delta); - if (world.rank() == 0) { - std::vector globalRes(cols + delta * world.size()); - std::vector sizes(world.size(), delta); - boost::mpi::gatherv(world, localMax.data(), localMax.size(), globalRes.data(), sizes, 0); - globalRes.resize(cols); - res_ = globalRes; - } else { - boost::mpi::gatherv(world, localMax.data(), localMax.size(), 0); - } - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - for (int i = 0; i < cols; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res_[i]; - } - } - return true; -} \ No newline at end of file diff --git a/tasks/seq/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp b/tasks/seq/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp deleted file mode 100644 index 2b48e3d2b5b..00000000000 --- a/tasks/seq/chizhov_m_max_values_by_columns_matrix/func_tests/main.cpp +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp" - -TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Zero_Columns) { - std::shared_ptr taskDataSeq = std::make_shared(); - int columns = 0; - std::vector matrix; - std::vector res_seq(columns, 0); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); - taskDataSeq->inputs_count.emplace_back((size_t)1); - taskDataSeq->inputs.emplace_back(reinterpret_cast(res_seq.data())); - taskDataSeq->inputs_count.emplace_back(res_seq.size()); - - chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - - ASSERT_FALSE(testTaskSequential.validation()); -} - -TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Empty_Matrix) { - std::shared_ptr taskDataSeq = std::make_shared(); - int columns = 3; - std::vector matrix; - std::vector res_seq(columns, 0); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); - taskDataSeq->inputs_count.emplace_back((size_t)1); - taskDataSeq->inputs.emplace_back(reinterpret_cast(res_seq.data())); - taskDataSeq->inputs_count.emplace_back(res_seq.size()); - - chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - - ASSERT_FALSE(testTaskSequential.validation()); -} - -TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Max_3_Columns) { - int columns = 3; - - // Create data - std::vector matrix = {1, 2, 3, 4, 5, 6, 7, 8, 9}; - std::vector max(columns, 0); - std::vector result = {7, 8, 9}; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); - taskDataSeq->inputs_count.emplace_back((size_t)1); - taskDataSeq->outputs.emplace_back(reinterpret_cast(max.data())); - taskDataSeq->outputs_count.emplace_back(max.size()); - - // Create Task - chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(result, max); -} - -TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Max_4_Columns) { - int columns = 4; - - // Create data - std::vector matrix = {4, 7, 5, 3, 8, 10, 12, 4, 2, 15, 3, 27}; - std::vector max(columns, 0); - std::vector result = {8, 15, 12, 27}; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); - taskDataSeq->inputs_count.emplace_back((size_t)1); - taskDataSeq->outputs.emplace_back(reinterpret_cast(max.data())); - taskDataSeq->outputs_count.emplace_back(max.size()); - - // Create Task - chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(result, max); -} - -TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Max_5_Columns) { - int columns = 5; - - // Create data - std::vector matrix = {4, 7, 5, 3, 8, 10, 12, 4, 2, 6, 2, 1, 15, 3, 27}; - std::vector max(columns, 0); - std::vector result = {10, 12, 15, 3, 27}; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); - taskDataSeq->inputs_count.emplace_back((size_t)1); - taskDataSeq->outputs.emplace_back(reinterpret_cast(max.data())); - taskDataSeq->outputs_count.emplace_back(max.size()); - - // Create Task - chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(result, max); -} - -TEST(chizhov_m_max_values_by_columns_matrix_seq, Test_Max_6_Columns) { - int columns = 6; - - // Create data - std::vector matrix = {9, 20, 3, 4, 7, 5, 3, 8, 10, 12, 4, 2, 6, 2, 1, 15, 3, 27}; - std::vector max(columns, 0); - std::vector result = {9, 20, 10, 15, 7, 27}; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); - taskDataSeq->inputs_count.emplace_back((size_t)1); - taskDataSeq->outputs.emplace_back(reinterpret_cast(max.data())); - taskDataSeq->outputs_count.emplace_back(max.size()); - - // Create Task - chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(result, max); -} \ No newline at end of file diff --git a/tasks/seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp b/tasks/seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp deleted file mode 100644 index 75bcd4a5f02..00000000000 --- a/tasks/seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace chizhov_m_max_values_by_columns_matrix_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int cols{}; - int rows{}; - std::vector input_; - std::vector res_; -}; - -} // namespace chizhov_m_max_values_by_columns_matrix_seq \ No newline at end of file diff --git a/tasks/seq/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp b/tasks/seq/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp deleted file mode 100644 index 856bcbf19e0..00000000000 --- a/tasks/seq/chizhov_m_max_values_by_columns_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp" - -TEST(chizhov_m_max_values_by_columns_matrix_seq, test_pipeline_run) { - int columns = 2000; - int rows = 5000; - - // Create data - std::vector matrix(rows * columns, 1); - std::vector result(columns, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); - taskDataSeq->inputs_count.emplace_back((size_t)1); - taskDataSeq->outputs.emplace_back(reinterpret_cast(result.data())); - taskDataSeq->outputs_count.emplace_back(result.size()); - - // Create Task - auto testTaskSequential = - std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - for (size_t i = 0; i < result.size(); i++) { - EXPECT_EQ(1, result[0]); - } -} - -TEST(chizhov_m_max_values_by_columns_matrix_seq, test_task_run) { - int rows; - int columns; - - // Create data - rows = 5000; - columns = 2000; - std::vector matrix(rows * columns, 1); - std::vector res(columns, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); - taskDataSeq->inputs_count.emplace_back(matrix.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&columns)); - taskDataSeq->inputs_count.emplace_back((size_t)1); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); - taskDataSeq->outputs_count.emplace_back(res.size()); - - // Create Task - auto testTaskSequential = - std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - for (size_t i = 0; i < res.size(); i++) { - EXPECT_EQ(1, res[0]); - } -} \ No newline at end of file diff --git a/tasks/seq/chizhov_m_max_values_by_columns_matrix/src/ops_seq.cpp b/tasks/seq/chizhov_m_max_values_by_columns_matrix/src/ops_seq.cpp deleted file mode 100644 index e5c41912960..00000000000 --- a/tasks/seq/chizhov_m_max_values_by_columns_matrix/src/ops_seq.cpp +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2024 Nesterov Alexander -#include "seq/chizhov_m_max_values_by_columns_matrix/include/ops_seq.hpp" - -#include -#include -#include - -bool chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - cols = (int)*taskData->inputs[1]; - rows = (int)(taskData->inputs_count[0] / cols); - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - - res_ = std::vector(cols, 0); - - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential::validation() { - internal_order_test(); - if ((int)*taskData->inputs[1] == 0) { - return false; - } - if (taskData->inputs.empty() || taskData->inputs_count[0] <= 0) { - return false; - } - if (*taskData->inputs[1] != taskData->outputs_count[0]) { - return false; - } - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential::run() { - internal_order_test(); - - for (int j = 0; j < cols; j++) { - int maxElement = input_[j]; - for (int i = 1; i < rows; i++) { - if (input_[i * cols + j] > maxElement) { - maxElement = input_[i * cols + j]; - } - } - res_[j] = maxElement; - } - - return true; -} - -bool chizhov_m_max_values_by_columns_matrix_seq::TestTaskSequential::post_processing() { - internal_order_test(); - - for (int j = 0; j < cols; j++) { - reinterpret_cast(taskData->outputs[0])[j] = res_[j]; - } - - return true; -} \ No newline at end of file From 228dc19df45acf555954b5f60764f37c48a1b8ce Mon Sep 17 00:00:00 2001 From: KorotinEgor <121280329+KorotinEgor@users.noreply.github.com> Date: Mon, 4 Nov 2024 03:59:35 +0300 Subject: [PATCH 078/155] =?UTF-8?q?=D0=9A=D0=BE=D1=80=D0=BE=D1=82=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=95=D0=B3=D0=BE=D1=80.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2014.=20=D0=9C=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD?= =?UTF-8?q?=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B5?= =?UTF-8?q?=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#100)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit seq: Матрица представлена в памяти в виде одномерного массива, для нахождения минимального элемента матрицы, создаётся временная переменная, изначально равная левому верхнему элементу матрицы, потом она поочерёдно сравнивается со всеми остальными элеметами матрицы. Если значение временной переменной оказывается больше выбранного элемента, она присвваивает значение этого элемента. mpi: Матрица представлена так же как и в seq, затем главный процесс разбивает её на n условно равных участков, где n - количество процессов, затем каждый процесс независимо вычисляет минимальный элемент в своей части матрицы по алгоритму, описанному в seq, после чего главный процесс собирает все минимумы, полученнные остальными процессами и выбирает из них самый минимальный элемент. --------- Co-authored-by: korotin_e --- .../func_tests/main.cpp | 149 ++++++++++++++++++ .../include/ops_mpi.hpp | 44 ++++++ .../perf_tests/main.cpp | 90 +++++++++++ .../korotin_e_min_val_matrix/src/ops_mpi.cpp | 119 ++++++++++++++ .../func_tests/main.cpp | 58 +++++++ .../include/ops_seq.hpp | 24 +++ .../perf_tests/main.cpp | 83 ++++++++++ .../korotin_e_min_val_matrix/src/ops_seq.cpp | 39 +++++ 8 files changed, 606 insertions(+) create mode 100644 tasks/mpi/korotin_e_min_val_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/korotin_e_min_val_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/korotin_e_min_val_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/korotin_e_min_val_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/korotin_e_min_val_matrix/func_tests/main.cpp create mode 100644 tasks/seq/korotin_e_min_val_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/korotin_e_min_val_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/korotin_e_min_val_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/korotin_e_min_val_matrix/func_tests/main.cpp b/tasks/mpi/korotin_e_min_val_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..3dcb9d3006b --- /dev/null +++ b/tasks/mpi/korotin_e_min_val_matrix/func_tests/main.cpp @@ -0,0 +1,149 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/korotin_e_min_val_matrix/include/ops_mpi.hpp" + +namespace korotin_e_min_val_matrix_mpi { + +std::vector getRandomMatrix(const unsigned rows, const unsigned columns, double scal) { + if (rows == 0 || columns == 0) { + throw std::invalid_argument("Can't creaate matrix with 0 rows or columns"); + } + + std::random_device dev; + std::mt19937 gen(dev()); + std::vector matrix(rows * columns); + for (unsigned i = 0; i < rows * columns; i++) { + matrix[i] = gen() / scal; + } + return matrix; +} + +} // namespace korotin_e_min_val_matrix_mpi + +TEST(korotin_e_min_val_matrix, cant_create_zeroed_matrix) { + boost::mpi::communicator world; + + if (world.rank() == 0) { + ASSERT_ANY_THROW(korotin_e_min_val_matrix_mpi::getRandomMatrix(0, 10, 100)); + ASSERT_ANY_THROW(korotin_e_min_val_matrix_mpi::getRandomMatrix(10, 0, 100)); + ASSERT_ANY_THROW(korotin_e_min_val_matrix_mpi::getRandomMatrix(0, 0, 100)); + } +} + +TEST(korotin_e_min_val_matrix, minval_is_correct) { + boost::mpi::communicator world; + std::vector matrix; + std::vector min_val(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const unsigned M = 30; + const unsigned N = 30; + matrix = korotin_e_min_val_matrix_mpi::getRandomMatrix(M, N, 100); + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(min_val.data())); + taskDataPar->outputs_count.emplace_back(min_val.size()); + } + + korotin_e_min_val_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + + korotin_e_min_val_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(reference[0], min_val[0]); + } +} + +TEST(korotin_e_min_val_matrix, matrix_minval_with_prime_rows_and_columns) { + boost::mpi::communicator world; + std::vector matrix; + std::vector min_val(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const unsigned M = 29; + const unsigned N = 31; + matrix = korotin_e_min_val_matrix_mpi::getRandomMatrix(M, N, 100); + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(min_val.data())); + taskDataPar->outputs_count.emplace_back(min_val.size()); + } + + korotin_e_min_val_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + + korotin_e_min_val_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_DOUBLE_EQ(reference[0], min_val[0]); + } +} + +TEST(korotin_e_min_val_matrix, minval_in_1_1_matrix) { + boost::mpi::communicator world; + std::vector matrix; + std::vector min_val(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const unsigned M = 1; + const unsigned N = 1; + matrix = korotin_e_min_val_matrix_mpi::getRandomMatrix(M, N, 100); + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(min_val.data())); + taskDataPar->outputs_count.emplace_back(min_val.size()); + } + + korotin_e_min_val_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_DOUBLE_EQ(matrix[0], min_val[0]); + } +} diff --git a/tasks/mpi/korotin_e_min_val_matrix/include/ops_mpi.hpp b/tasks/mpi/korotin_e_min_val_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..2e7f7caa78e --- /dev/null +++ b/tasks/mpi/korotin_e_min_val_matrix/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace korotin_e_min_val_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + double res = 0.0; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + double res = 0.0; + boost::mpi::communicator world; +}; + +} // namespace korotin_e_min_val_matrix_mpi diff --git a/tasks/mpi/korotin_e_min_val_matrix/perf_tests/main.cpp b/tasks/mpi/korotin_e_min_val_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..fd7b95da44e --- /dev/null +++ b/tasks/mpi/korotin_e_min_val_matrix/perf_tests/main.cpp @@ -0,0 +1,90 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/korotin_e_min_val_matrix/include/ops_mpi.hpp" + +TEST(korotin_e_min_val_matrix, test_pipeline_run) { + boost::mpi::communicator world; + std::vector matrix; + std::vector min_val(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int rows; + int columns; + if (world.rank() == 0) { + rows = columns = 60; + matrix = std::vector(rows * columns, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(min_val.data())); + taskDataPar->outputs_count.emplace_back(min_val.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_DOUBLE_EQ(1, min_val[0]); + } +} + +TEST(korotin_e_min_val_matrix, test_task_run) { + boost::mpi::communicator world; + std::vector matrix; + std::vector min_val(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int rows; + int columns; + if (world.rank() == 0) { + rows = columns = 60; + matrix = std::vector(rows * columns, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(min_val.data())); + taskDataPar->outputs_count.emplace_back(min_val.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_DOUBLE_EQ(1, min_val[0]); + } +} diff --git a/tasks/mpi/korotin_e_min_val_matrix/src/ops_mpi.cpp b/tasks/mpi/korotin_e_min_val_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..c4b30587173 --- /dev/null +++ b/tasks/mpi/korotin_e_min_val_matrix/src/ops_mpi.cpp @@ -0,0 +1,119 @@ +#include "mpi/korotin_e_min_val_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool korotin_e_min_val_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* start = reinterpret_cast(taskData->inputs[0]); + std::copy(start, start + taskData->inputs_count[0], input_.begin()); + res = 0.0; + return true; +} + +bool korotin_e_min_val_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool korotin_e_min_val_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + res = input_[0]; + for (unsigned i = 1; i < taskData->inputs_count[0]; i++) { + if (input_[i] < res) res = input_[i]; + } + return true; +} + +bool korotin_e_min_val_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool korotin_e_min_val_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* start = reinterpret_cast(taskData->inputs[0]); + std::copy(start, start + taskData->inputs_count[0], input_.begin()); + } + res = 0.0; + return true; +} + +bool korotin_e_min_val_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool korotin_e_min_val_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int delta = 0; + int remainder = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + remainder = taskData->inputs_count[0] % world.size(); + } + broadcast(world, delta, 0); + broadcast(world, remainder, 0); + + if (world.rank() == 0) { + int counter = 1; + for (int proc = 1; proc < world.size(); proc++) { + if (counter < remainder) { + world.send(proc, 0, input_.data() + proc * delta + counter, delta + 1); + counter++; + } else + world.send(proc, 0, input_.data() + proc * delta + remainder, delta); + } + } + + if (world.rank() < remainder) { + local_input_ = std::vector(delta + 1); + } else + local_input_ = std::vector(delta); + + if (world.rank() == 0) { + if (remainder > 0) + local_input_ = std::vector(input_.begin(), input_.begin() + delta + 1); + else + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + if (world.rank() < remainder) { + world.recv(0, 0, local_input_.data(), delta + 1); + } else + world.recv(0, 0, local_input_.data(), delta); + } + double local_res; + + if (local_input_.empty()) + local_res = INFINITY; + else { + local_res = local_input_[0]; + for (std::vector::size_type i = 1; i < local_input_.size(); i++) { + if (local_input_[i] < local_res) local_res = local_input_[i]; + } + } + + reduce(world, local_res, res, boost::mpi::minimum(), 0); + + return true; +} + +bool korotin_e_min_val_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/korotin_e_min_val_matrix/func_tests/main.cpp b/tasks/seq/korotin_e_min_val_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..9f622048b6e --- /dev/null +++ b/tasks/seq/korotin_e_min_val_matrix/func_tests/main.cpp @@ -0,0 +1,58 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/korotin_e_min_val_matrix/include/ops_seq.hpp" + +namespace korotin_e_min_val_matrix_seq { + +std::vector getRandomMatrix(const unsigned rows, const unsigned columns, double scal) { + if (rows == 0 || columns == 0) { + throw std::invalid_argument("Can't creaate matrix with 0 rows or columns"); + } + + std::random_device dev; + std::mt19937 gen(dev()); + std::vector matrix(rows * columns); + for (unsigned i = 0; i < rows * columns; i++) { + matrix[i] = gen() / scal; + } + return matrix; +} + +} // namespace korotin_e_min_val_matrix_seq + +TEST(korotin_e_min_val_matrix_seq, test_matrix_0) { + ASSERT_ANY_THROW(korotin_e_min_val_matrix_seq::getRandomMatrix(0, 10, 100)); + ASSERT_ANY_THROW(korotin_e_min_val_matrix_seq::getRandomMatrix(10, 0, 100)); + ASSERT_ANY_THROW(korotin_e_min_val_matrix_seq::getRandomMatrix(0, 0, 100)); +} + +TEST(korotin_e_min_val_matrix_seq, test_matrix_5_5) { + const unsigned rows = 5; + const unsigned columns = 5; + double res; + + // Create data + std::vector matrix; + std::vector min_val(1, -5); + + matrix = korotin_e_min_val_matrix_seq::getRandomMatrix(rows, columns, 100.0); + res = *std::min_element(matrix.begin(), matrix.end()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_val.data())); + taskDataSeq->outputs_count.emplace_back(min_val.size()); + + // Create Task + korotin_e_min_val_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_DOUBLE_EQ(res, min_val[0]); +} diff --git a/tasks/seq/korotin_e_min_val_matrix/include/ops_seq.hpp b/tasks/seq/korotin_e_min_val_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..d5092ef8c4d --- /dev/null +++ b/tasks/seq/korotin_e_min_val_matrix/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace korotin_e_min_val_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + double res{}; +}; + +} // namespace korotin_e_min_val_matrix_seq diff --git a/tasks/seq/korotin_e_min_val_matrix/perf_tests/main.cpp b/tasks/seq/korotin_e_min_val_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..4c1868c10e0 --- /dev/null +++ b/tasks/seq/korotin_e_min_val_matrix/perf_tests/main.cpp @@ -0,0 +1,83 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/korotin_e_min_val_matrix/include/ops_seq.hpp" + +TEST(korotin_e_min_val_matrix_seq, test_pipeline_run) { + const unsigned rows = 50; + const unsigned columns = 50; + + // Create data + std::vector matrix(rows * columns, 1); + std::vector min_val(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_val.data())); + taskDataSeq->outputs_count.emplace_back(min_val.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_DOUBLE_EQ(1, min_val[0]); +} + +TEST(korotin_e_min_val_matrix_seq, test_task_run) { + const unsigned rows = 50; + const unsigned columns = 50; + + // Create data + std::vector matrix(rows * columns, 1); + std::vector min_val(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_val.data())); + taskDataSeq->outputs_count.emplace_back(min_val.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_DOUBLE_EQ(1, min_val[0]); +} diff --git a/tasks/seq/korotin_e_min_val_matrix/src/ops_seq.cpp b/tasks/seq/korotin_e_min_val_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..501eb9a7b1a --- /dev/null +++ b/tasks/seq/korotin_e_min_val_matrix/src/ops_seq.cpp @@ -0,0 +1,39 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/korotin_e_min_val_matrix/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool korotin_e_min_val_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + auto* start = reinterpret_cast(taskData->inputs[0]); + std::copy(start, start + taskData->inputs_count[0], input_.begin()); + // Init value for output + res = 0.0; + return true; +} + +bool korotin_e_min_val_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool korotin_e_min_val_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + res = input_[0]; + for (unsigned i = 1; i < taskData->inputs_count[0]; i++) { + if (input_[i] < res) res = input_[i]; + } + return true; +} + +bool korotin_e_min_val_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From d269f287f112d55773996ad3d8fe7981518a95ef Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 4 Nov 2024 09:00:51 +0800 Subject: [PATCH 079/155] =?UTF-8?q?Revert=20"=D0=A1=D0=BE=D0=BB=D0=BE?= =?UTF-8?q?=D0=B2=D1=8C=D0=B5=D0=B2=20=D0=94=D0=B0=D0=BD=D0=B8=D0=BB=D0=B0?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=203.=20=D0=9C=D0=B0=D0=BA=D1=81?= =?UTF-8?q?=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD?= =?UTF-8?q?=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC?= =?UTF-8?q?=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE?= =?UTF-8?q?=D1=80=D0=B0."=20(#179)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#17 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11656113421/job/32451780480 image --- .../solovyev_d_vector_max/func_tests/main.cpp | 107 ------------ .../solovyev_d_vector_max/include/header.hpp | 48 ------ .../solovyev_d_vector_max/perf_tests/main.cpp | 98 ----------- .../mpi/solovyev_d_vector_max/src/source.cpp | 119 -------------- .../solovyev_d_vector_max/func_tests/main.cpp | 153 ------------------ .../solovyev_d_vector_max/include/header.hpp | 25 --- .../solovyev_d_vector_max/perf_tests/main.cpp | 92 ----------- .../seq/solovyev_d_vector_max/src/source.cpp | 47 ------ 8 files changed, 689 deletions(-) delete mode 100644 tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp delete mode 100644 tasks/mpi/solovyev_d_vector_max/include/header.hpp delete mode 100644 tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp delete mode 100644 tasks/mpi/solovyev_d_vector_max/src/source.cpp delete mode 100644 tasks/seq/solovyev_d_vector_max/func_tests/main.cpp delete mode 100644 tasks/seq/solovyev_d_vector_max/include/header.hpp delete mode 100644 tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp delete mode 100644 tasks/seq/solovyev_d_vector_max/src/source.cpp diff --git a/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp b/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp deleted file mode 100644 index ba20c58cfc1..00000000000 --- a/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp +++ /dev/null @@ -1,107 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -#include "mpi/solovyev_d_vector_max/include/header.hpp" - -std::vector getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -TEST(solovyev_d_vector_max_mpi, Test_Max) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - std::cerr << "1 " << world.rank() << std::endl; - if (world.rank() == 0) { - const int count_size_vector = 240; - global_vec = getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - std::cerr << "2 " << world.rank() << std::endl; - solovyev_d_vector_max_mpi::VectorMaxMPIParallel VectorMaxMPIParallel(taskDataPar); - ASSERT_EQ(VectorMaxMPIParallel.validation(), true); - VectorMaxMPIParallel.pre_processing(); - VectorMaxMPIParallel.run(); - VectorMaxMPIParallel.post_processing(); - std::cerr << "3 " << world.rank() << std::endl; - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxMPISequential(taskDataSeq); - ASSERT_EQ(VectorMaxMPISequential.validation(), true); - VectorMaxMPISequential.pre_processing(); - VectorMaxMPISequential.run(); - VectorMaxMPISequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(solovyev_d_vector_max_mpi, Test_Max_2) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 120; - global_vec = getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - solovyev_d_vector_max_mpi::VectorMaxMPIParallel VectorMaxMPIParallel(taskDataPar); - ASSERT_EQ(VectorMaxMPIParallel.validation(), true); - VectorMaxMPIParallel.pre_processing(); - VectorMaxMPIParallel.run(); - VectorMaxMPIParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxMPISequential(taskDataSeq); - ASSERT_EQ(VectorMaxMPISequential.validation(), true); - VectorMaxMPISequential.pre_processing(); - VectorMaxMPISequential.run(); - VectorMaxMPISequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} diff --git a/tasks/mpi/solovyev_d_vector_max/include/header.hpp b/tasks/mpi/solovyev_d_vector_max/include/header.hpp deleted file mode 100644 index 0b49b459cad..00000000000 --- a/tasks/mpi/solovyev_d_vector_max/include/header.hpp +++ /dev/null @@ -1,48 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace solovyev_d_vector_max_mpi { - -int vectorMax(std::vector> v); - -class VectorMaxSequential : public ppc::core::Task { - public: - explicit VectorMaxSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector data; - int result{}; - std::string ops; -}; - -class VectorMaxMPIParallel : public ppc::core::Task { - public: - explicit VectorMaxMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector data, localData; - int result{}; - std::string ops; - boost::mpi::communicator world; -}; - -} // namespace solovyev_d_vector_max_mpi \ No newline at end of file diff --git a/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp b/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp deleted file mode 100644 index 36f24830de3..00000000000 --- a/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp +++ /dev/null @@ -1,98 +0,0 @@ -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/solovyev_d_vector_max/include/header.hpp" - -std::vector getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -TEST(solovyev_d_vector_max_mpi, run_pipeline) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_res(1, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_vector; - if (world.rank() == 0) { - count_size_vector = 12000000; - global_vec = getRandomVector(count_size_vector); - global_vec[count_size_vector / 2] = 1024; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); - taskDataPar->outputs_count.emplace_back(global_res.size()); - } - - auto VectorMaxMPIParallel = std::make_shared(taskDataPar); - ASSERT_EQ(VectorMaxMPIParallel->validation(), true); - VectorMaxMPIParallel->pre_processing(); - VectorMaxMPIParallel->run(); - VectorMaxMPIParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(VectorMaxMPIParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(1024, global_res[0]); - } -} - -TEST(solovyev_d_vector_max_mpi, run_task) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_res(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_vector; - if (world.rank() == 0) { - count_size_vector = 12000000; - global_vec = getRandomVector(count_size_vector); - global_vec[count_size_vector / 2] = 1024; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); - taskDataPar->outputs_count.emplace_back(global_res.size()); - } - - auto VectorMaxMPIParallel = std::make_shared(taskDataPar); - ASSERT_EQ(VectorMaxMPIParallel->validation(), true); - VectorMaxMPIParallel->pre_processing(); - VectorMaxMPIParallel->run(); - VectorMaxMPIParallel->post_processing(); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(VectorMaxMPIParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(1024, global_res[0]); - } -} diff --git a/tasks/mpi/solovyev_d_vector_max/src/source.cpp b/tasks/mpi/solovyev_d_vector_max/src/source.cpp deleted file mode 100644 index 76213313933..00000000000 --- a/tasks/mpi/solovyev_d_vector_max/src/source.cpp +++ /dev/null @@ -1,119 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "mpi/solovyev_d_vector_max/include/header.hpp" - -using namespace std::chrono_literals; - -int solovyev_d_vector_max_mpi::vectorMax(std::vector> v) { - int m = -214748364; - for (std::string::size_type i = 0; i < v.size(); i++) { - if (v[i] > m) { - m = v[i]; - } - } - return m; -} - -bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::pre_processing() { - internal_order_test(); - - // Determine number of vector elements per process - unsigned int delta = 0; - if (world.rank() == 0) { - delta = taskData->inputs_count[0] / world.size(); - } - - // Share delta between all processes - broadcast(world, delta, 0); - - if (world.rank() == 0) { - // Convert input data to vector - int* input_ = reinterpret_cast(taskData->inputs[0]); - data = std::vector(input_, input_ + taskData->inputs_count[0]); - - // Send each of processes their portion of data - for (int process = 1; process < world.size(); process++) { - world.send(process, 0, data.data() + process * delta, delta); - } - } - - // Initialize local vector - localData = std::vector(delta); - if (world.rank() == 0) { - // Getting data directly if we in zero process - localData = std::vector(data.begin(), data.begin() + delta); - } else { - // Otherwise, recieving data - world.recv(0, 0, localData.data(), delta); - } - - // Init result value - result = 0; - return true; -} - -bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - // Check count elements of output - return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); - } - return true; -} - -bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::run() { - internal_order_test(); - int localResult; - - // Search for maximum vector element in current process data - localResult = vectorMax(localData); - - // Search for maximum vector element using all processes data - reduce(world, localResult, result, boost::mpi::maximum(), 0); - return true; -} - -bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = result; - } - return true; -} - -bool solovyev_d_vector_max_mpi::VectorMaxSequential::pre_processing() { - internal_order_test(); - - // Init data vector - int* input_ = reinterpret_cast(taskData->inputs[0]); - data = std::vector(input_, input_ + taskData->inputs_count[0]); - - // Init result value - result = 0; - return true; -} - -bool solovyev_d_vector_max_mpi::VectorMaxSequential::validation() { - internal_order_test(); - // Check count elements of output - return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); -} - -bool solovyev_d_vector_max_mpi::VectorMaxSequential::run() { - internal_order_test(); - - // Determine maximum value of data vector - result = vectorMax(data); - return true; -} - -bool solovyev_d_vector_max_mpi::VectorMaxSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = result; - return true; -} \ No newline at end of file diff --git a/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp b/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp deleted file mode 100644 index e05edf14e91..00000000000 --- a/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp +++ /dev/null @@ -1,153 +0,0 @@ -#include - -#include -#include - -#include "seq/solovyev_d_vector_max/include/header.hpp" - -std::vector getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -TEST(solovyev_d_vector_max_mpi, Test_Empty) { - // Create data - std::vector in(0, 0); - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); - ASSERT_EQ(VectorMaxSequential.validation(), false); -} - -TEST(solovyev_d_vector_max_mpi, Test_Max_10) { - const int count = 10; - - // Create data - std::vector in = getRandomVector(count); - in[count / 2] = 1024; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); - ASSERT_EQ(VectorMaxSequential.validation(), true); - VectorMaxSequential.pre_processing(); - VectorMaxSequential.run(); - VectorMaxSequential.post_processing(); - ASSERT_EQ(1024, out[0]); -} - -TEST(solovyev_d_vector_max_mpi, Test_Max_100) { - const int count = 20; - - // Create data - std::vector in = getRandomVector(count); - in[count / 2] = 1024; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); - ASSERT_EQ(VectorMaxSequential.validation(), true); - VectorMaxSequential.pre_processing(); - VectorMaxSequential.run(); - VectorMaxSequential.post_processing(); - ASSERT_EQ(1024, out[0]); -} - -TEST(solovyev_d_vector_max_mpi, Test_Max_1000) { - const int count = 50; - - // Create data - std::vector in = getRandomVector(count); - in[count / 2] = 1024; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); - ASSERT_EQ(VectorMaxSequential.validation(), true); - VectorMaxSequential.pre_processing(); - VectorMaxSequential.run(); - VectorMaxSequential.post_processing(); - ASSERT_EQ(1024, out[0]); -} - -TEST(solovyev_d_vector_max_mpi, Test_Max_10000) { - const int count = 70; - - // Create data - std::vector in = getRandomVector(count); - in[count / 2] = 1024; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); - ASSERT_EQ(VectorMaxSequential.validation(), true); - VectorMaxSequential.pre_processing(); - VectorMaxSequential.run(); - VectorMaxSequential.post_processing(); - ASSERT_EQ(1024, out[0]); -} - -TEST(solovyev_d_vector_max_mpi, Test_Max_100000) { - const int count = 100; - - // Create data - std::vector in = getRandomVector(count); - in[count / 2] = 1024; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); - ASSERT_EQ(VectorMaxSequential.validation(), true); - VectorMaxSequential.pre_processing(); - VectorMaxSequential.run(); - VectorMaxSequential.post_processing(); - ASSERT_EQ(1024, out[0]); -} diff --git a/tasks/seq/solovyev_d_vector_max/include/header.hpp b/tasks/seq/solovyev_d_vector_max/include/header.hpp deleted file mode 100644 index 712e45ed1ac..00000000000 --- a/tasks/seq/solovyev_d_vector_max/include/header.hpp +++ /dev/null @@ -1,25 +0,0 @@ - -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace solovyev_d_vector_max_mpi { -int vectorMax(std::vector> v); -class VectorMaxSequential : public ppc::core::Task { - public: - explicit VectorMaxSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector data; - int result{}; - std::string ops; -}; - -} // namespace solovyev_d_vector_max_mpi \ No newline at end of file diff --git a/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp b/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp deleted file mode 100644 index 8323ffa5f34..00000000000 --- a/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp +++ /dev/null @@ -1,92 +0,0 @@ -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "seq/solovyev_d_vector_max/include/header.hpp" - -std::vector getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -TEST(solovyev_d_vector_max_mpi, test_pipeline_run) { - const int count = 12000000; - - // Create data - std::vector in = getRandomVector(count); - in[count / 2] = 1024; - std::vector out(1, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(1024, out[0]); -} - -TEST(solovyev_d_vector_max_mpi, test_task_run) { - const int count = 12000000; - - // Create data - std::vector in = getRandomVector(count); - in[count / 2] = 1024; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(1024, out[0]); -} diff --git a/tasks/seq/solovyev_d_vector_max/src/source.cpp b/tasks/seq/solovyev_d_vector_max/src/source.cpp deleted file mode 100644 index 6ead459248c..00000000000 --- a/tasks/seq/solovyev_d_vector_max/src/source.cpp +++ /dev/null @@ -1,47 +0,0 @@ -#include -#include - -#include "seq/solovyev_d_vector_max/include/header.hpp" - -using namespace std::chrono_literals; - -int solovyev_d_vector_max_mpi::vectorMax(std::vector> v) { - int m = -214748364; - for (std::string::size_type i = 0; i < v.size(); i++) { - if (v[i] > m) { - m = v[i]; - } - } - return m; -} - -bool solovyev_d_vector_max_mpi::VectorMaxSequential::pre_processing() { - internal_order_test(); - - // Init data vector - int* input_ = reinterpret_cast(taskData->inputs[0]); - data = std::vector(input_, input_ + taskData->inputs_count[0]); - // Init result value - result = 0; - return true; -} - -bool solovyev_d_vector_max_mpi::VectorMaxSequential::validation() { - internal_order_test(); - // Check count elements of output - return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); -} - -bool solovyev_d_vector_max_mpi::VectorMaxSequential::run() { - internal_order_test(); - - // Determine maximum value of data vector - result = vectorMax(data); - return true; -} - -bool solovyev_d_vector_max_mpi::VectorMaxSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = result; - return true; -} \ No newline at end of file From 9ccaccb57783b376c8be1ae0a133fc062c83fd8e Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 4 Nov 2024 09:01:56 +0800 Subject: [PATCH 080/155] [CI] Run performance testing every 4 hours (#177) --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 9d26069a9fd..0f84f4a29c7 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -4,7 +4,7 @@ on: push: pull_request: schedule: - - cron: '0 12 * * *' + - cron: '0 */4 * * *' workflow_dispatch: jobs: From 496fb62d877b0826492958a60a5fdd229416f490 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 4 Nov 2024 09:25:49 +0800 Subject: [PATCH 081/155] =?UTF-8?q?Revert=20"=D0=93=D0=BE=D1=80=D0=B4?= =?UTF-8?q?=D0=B5=D0=B5=D0=B2=D0=B0=20=D0=A2=D0=B0=D0=B8=D1=81=D0=B8=D1=8F?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2016.=20=D0=9D=D0=B0=D1=85=D0=BE?= =?UTF-8?q?=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA=D1=81?= =?UTF-8?q?=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD?= =?UTF-8?q?=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81?= =?UTF-8?q?=D1=82=D0=BE=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82?= =?UTF-8?q?=D1=80=D0=B8=D1=86=D1=8B"=20(#180)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#83 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11656291306/job/32452199112 image image --- .../func_tests/main.cpp | 210 ------------------ .../include/ops_mpi.hpp | 47 ---- .../perf_tests/main.cpp | 81 ------- .../src/ops_mpi.cpp | 186 ---------------- .../func_tests/main.cpp | 149 ------------- .../include/ops_seq.hpp | 25 --- .../perf_tests/main.cpp | 79 ------- .../src/ops_seq.cpp | 83 ------- 8 files changed, 860 deletions(-) delete mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp delete mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp delete mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp delete mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp delete mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp delete mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp delete mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp delete mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp deleted file mode 100644 index e170a88d7f8..00000000000 --- a/tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp +++ /dev/null @@ -1,210 +0,0 @@ -#include - -#include -#include -#include - -#include "mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp" - -TEST(gordeva_t_max_val_of_column_matrix_mpi, IsEmptyInput) { - boost::mpi::communicator world; - - std::shared_ptr taskDataPar = std::make_shared(); - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - - if (world.rank() == 0) { - ASSERT_FALSE(testMpiTaskParallel.validation()); - } -} - -TEST(gordeva_t_max_val_of_column_matrix_mpi, IsEmptyOutput) { - boost::mpi::communicator world; - - std::shared_ptr taskDataPar = std::make_shared(); - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - - if (world.rank() == 0) { - taskDataPar->inputs_count.push_back(5); - taskDataPar->inputs_count.push_back(5); - taskDataPar->inputs.push_back(reinterpret_cast(new int[25])); - ASSERT_FALSE(testMpiTaskParallel.validation()); - - delete[] reinterpret_cast(taskDataPar->inputs[0]); - } -} - -TEST(gordeva_t_max_val_of_column_matrix_mpi, Max_val_of_500_columns_with_random) { - boost::mpi::communicator world; - - const int rows = 500; - const int cols = 500; - std::vector> global_matr; - std::vector global_max(cols, INT_MIN); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); - for (unsigned int i = 0; i < global_matr.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - } - taskDataPar->inputs_count = {rows, cols}; - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector max_example(cols, INT_MIN); - - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - } - taskDataSeq->inputs_count = {rows, cols}; - taskDataSeq->outputs.emplace_back(reinterpret_cast(max_example.data())); - taskDataSeq->outputs_count.emplace_back(max_example.size()); - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(global_max, max_example); - } -} - -TEST(gordeva_t_max_val_of_column_matrix_mpi, Max_val_of_500_1000_columns_with_random) { - boost::mpi::communicator world; - - const int rows = 500; - const int cols = 1000; - std::vector> global_matr; - std::vector global_max(cols, INT_MIN); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); - for (unsigned int i = 0; i < global_matr.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - } - taskDataPar->inputs_count = {rows, cols}; - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector max_example(cols, INT_MIN); - - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - } - taskDataSeq->inputs_count = {rows, cols}; - taskDataSeq->outputs.emplace_back(reinterpret_cast(max_example.data())); - taskDataSeq->outputs_count.emplace_back(max_example.size()); - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - for (int i = 0; i < cols; i++) { - ASSERT_EQ(global_max[i], max_example[i]); - } - } -} - -TEST(gordeva_t_max_val_of_column_matrix_mpi, Max_val_of_1000_3000_columns_with_random) { - boost::mpi::communicator world; - - const int rows = 1000; - const int cols = 3000; - std::vector> global_matr; - std::vector global_max(cols, INT_MIN); - - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); - for (unsigned int i = 0; i < global_matr.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - } - taskDataPar->inputs_count = {rows, cols}; - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector max_example(cols, INT_MIN); - - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matr.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); - } - taskDataSeq->inputs_count = {rows, cols}; - taskDataSeq->outputs.emplace_back(reinterpret_cast(max_example.data())); - taskDataSeq->outputs_count.emplace_back(max_example.size()); - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - for (int i = 0; i < cols; i++) { - ASSERT_EQ(global_max[i], max_example[i]); - } - } -} - -TEST(gordeva_t_max_val_of_column_matrix_mpi, Incorrect_val_size_of_input) { - boost::mpi::communicator world; - - std::shared_ptr taskDataPar = std::make_shared(); - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - - if (world.rank() == 0) { - taskDataPar->inputs_count.push_back(2); - taskDataPar->inputs_count.push_back(3); - taskDataPar->inputs.push_back(reinterpret_cast(new int[6])); - taskDataPar->outputs_count.push_back(2); - ASSERT_FALSE(testMpiTaskParallel.validation()); - - delete[] reinterpret_cast(taskDataPar->inputs[0]); - } -} - -TEST(gordeva_t_max_val_of_column_matrix_mpi, Incorrect_val_of_output) { - boost::mpi::communicator world; - - std::shared_ptr taskDataPar = std::make_shared(); - gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - - if (world.rank() == 0) { - taskDataPar->inputs_count.push_back(10); - taskDataPar->inputs_count.push_back(15); - taskDataPar->inputs.push_back(reinterpret_cast(new int[150])); - taskDataPar->outputs_count.push_back(2); - ASSERT_FALSE(testMpiTaskParallel.validation()); - - delete[] reinterpret_cast(taskDataPar->inputs[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp deleted file mode 100644 index 43906c64bf9..00000000000 --- a/tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace gordeva_t_max_val_of_column_matrix_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - static std::vector gen_rand_vec(int s, int low = 0, int upp = 50); - static std::vector> gen_rand_matr(int rows, int cols); - - private: - std::vector> input_; - std::vector res; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_, local_input_; - std::vector res; - boost::mpi::communicator world; -}; - -} // namespace gordeva_t_max_val_of_column_matrix_mpi diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp deleted file mode 100644 index b3aa5af7edb..00000000000 --- a/tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,81 +0,0 @@ -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp" - -TEST(gordeva_t_max_val_of_column_matrix_mpi, test_pipeline_run) { - boost::mpi::communicator world; - std::vector> global_matr; - std::vector max_s; - - std::shared_ptr taskDataPar = std::make_shared(); - int rows = 5000; - int cols = 5000; - - if (world.rank() == 0) { - global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); - max_s.resize(cols, INT_MIN); - for (auto& i : global_matr) { - taskDataPar->inputs.emplace_back(reinterpret_cast(i.data())); - } - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(max_s.data())); - taskDataPar->outputs_count.emplace_back(max_s.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - if (world.rank() == 0) { - for (size_t j = 0; j < max_s.size(); ++j) { - ASSERT_EQ(max_s[j], 200); - } - } -} - -TEST(gordeva_t_max_val_of_column_matrix_mpi, test_task_run) { - boost::mpi::communicator world; - - std::vector> global_matr; - std::vector max_s; - std::shared_ptr taskDataPar = std::make_shared(); - int rows = 7000; - int cols = 7000; - - if (world.rank() == 0) { - global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(rows, cols); - max_s.resize(cols, INT_MIN); - - for (auto& i : global_matr) { - taskDataPar->inputs.emplace_back(reinterpret_cast(i.data())); - } - - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(max_s.data())); - taskDataPar->outputs_count.emplace_back(max_s.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - if (world.rank() == 0) { - for (size_t j = 0; j < max_s.size(); ++j) { - ASSERT_EQ(max_s[j], 200); - } - } -} diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp deleted file mode 100644 index 34be2cf970e..00000000000 --- a/tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp +++ /dev/null @@ -1,186 +0,0 @@ -#include "mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - - int rows = taskData->inputs_count[0]; - int cols = taskData->inputs_count[1]; - - input_.resize(rows, std::vector(cols)); - - for (int i = 0; i < rows; i++) { - int* input_matr = reinterpret_cast(taskData->inputs[i]); - for (int j = 0; j < cols; j++) input_[i][j] = input_matr[j]; - } - - res.resize(cols); - - return true; -} - -bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - - if (taskData->inputs.empty() || taskData->outputs.empty()) return false; - if (taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) return false; - if (taskData->outputs_count.size() != 1) return false; - if (taskData->inputs_count.size() < 2) return false; - if (taskData->outputs_count[0] != taskData->inputs_count[1]) return false; - - return true; -} - -bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::run() { - internal_order_test(); - // int cols = input_[0].size(); - // int rows = input_.size(); - for (size_t i = 0; i < input_[0].size(); i++) { - int max_el = input_[0][i]; - for (size_t j = 1; j < input_.size(); j++) - if (input_[j][i] > max_el) max_el = input_[j][i]; - - res[i] = max_el; - } - - return true; -} - -bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - - int* output_matr = reinterpret_cast(taskData->outputs[0]); - - std::copy(res.begin(), res.end(), output_matr); - return true; -} - -std::vector gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_vec(int s, int low, int upp) { - std::vector v(s); - for (auto& i : v) i = low + (std::rand() % (upp - low + 1)); - return v; -} - -std::vector> gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::gen_rand_matr(int rows, - int cols) { - std::vector> matr(rows, std::vector(cols)); - - for (int i = 0; i < rows; ++i) { - matr[i] = gen_rand_vec(cols, -500, 500); - } - for (int j = 0; j < cols; ++j) { - int row_rand = std::rand() % rows; - matr[row_rand][j] = 10; - } - return matr; -} - -bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - return true; -} - -bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - - if (world.rank() == 0) { - if (taskData->inputs.empty() || taskData->outputs.empty()) return false; - if (taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) return false; - if (taskData->outputs_count.size() != 1) return false; - if (taskData->inputs_count.size() < 2) return false; - if (taskData->outputs_count[0] != taskData->inputs_count[1]) return false; - } - return true; -} - -bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - int rows = 0; - int cols = 0; - - int delta = 0; - int delta_1 = 0; - - if (world.rank() == 0) { - rows = taskData->inputs_count[0]; - cols = taskData->inputs_count[1]; - } - - broadcast(world, rows, 0); - broadcast(world, cols, 0); - - delta = rows / world.size(); - delta_1 = rows % world.size(); - - if (world.rank() == 0) { - input_.resize(rows, std::vector(cols)); - for (int i = 0; i < rows; i++) { - int* input_matr = reinterpret_cast(taskData->inputs[i]); - input_[i].assign(input_matr, input_matr + cols); - } - - for (int proc = 1; proc < world.size(); proc++) { - int row_1 = proc * delta + std::min(proc, delta_1); - int kol_vo = delta + (proc < delta_1 ? 1 : 0); - - for (int i = row_1; i < row_1 + kol_vo; i++) world.send(proc, 0, input_[i].data(), cols); - } - } - - int local_input_rows = delta + (world.rank() < delta_1 ? 1 : 0); - local_input_.resize(local_input_rows, std::vector(cols)); - - if (world.rank() == 0) { - std::copy(input_.begin(), input_.begin() + local_input_rows, local_input_.begin()); - } else { - for (int i = 0; i < local_input_rows; i++) world.recv(0, 0, local_input_[i].data(), cols); - } - - res.resize(cols); - - std::vector tmp_max(local_input_[0].size(), INT_MIN); - - for (size_t i = 0; i < local_input_[0].size(); i++) { - for (size_t j = 0; j < local_input_.size(); j++) { - tmp_max[i] = std::max(tmp_max[i], local_input_[j][i]); - } - } - - if (world.rank() == 0) { - std::vector max_s(res.size(), INT_MIN); - std::copy(tmp_max.begin(), tmp_max.end(), max_s.begin()); - - for (int proc = 1; proc < world.size(); proc++) { - std::vector proc_max(res.size()); - world.recv(proc, 0, proc_max.data(), res.size()); - - for (size_t i = 0; i < res.size(); i++) { - max_s[i] = std::max(max_s[i], proc_max[i]); - } - } - std::copy(max_s.begin(), max_s.end(), res.begin()); - } else { - world.send(0, 0, tmp_max.data(), tmp_max.size()); - } - return true; -} - -bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - - if (world.rank() == 0) { - std::copy(res.begin(), res.end(), reinterpret_cast(taskData->outputs[0])); - } - return true; -} diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp deleted file mode 100644 index 79ab69263fe..00000000000 --- a/tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp +++ /dev/null @@ -1,149 +0,0 @@ -#include - -#include -#include - -#include "seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp" - -TEST(gordeva_t_max_val_of_column_matrix_seq, IsEmptyInput) { - std::shared_ptr taskDataSeq = std::make_shared(); - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_FALSE(testTaskSequential.validation()); -} - -TEST(gordeva_t_max_val_of_column_matrix_seq, IsEmptyOutput) { - std::shared_ptr taskDataSeq = std::make_shared(); - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - - taskDataSeq->inputs_count.push_back(5); - taskDataSeq->inputs_count.push_back(5); - taskDataSeq->inputs.push_back(reinterpret_cast(new int[25])); - - ASSERT_FALSE(testTaskSequential.validation()); - - delete[] reinterpret_cast(taskDataSeq->inputs[0]); -} - -TEST(gordeva_t_max_val_of_column_matrix_seq, Max_val_of_5000_columns_with_random) { - const int rows = 5000; - const int cols = 5000; - - std::shared_ptr taskDataSeq = std::make_shared(); - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix = - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); - for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector res(cols, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); - taskDataSeq->outputs_count.emplace_back(res.size()); - - ASSERT_EQ(testTaskSequential.validation(), true); - ASSERT_TRUE(testTaskSequential.pre_processing()); - ASSERT_TRUE(testTaskSequential.run()); - ASSERT_TRUE(testTaskSequential.post_processing()); - - for (int j = 0; j < cols; j++) { - int max_el = matrix[0][j]; - for (int i = 1; i < rows; i++) { - if (matrix[i][j] > max_el) { - max_el = matrix[i][j]; - } - } - ASSERT_EQ(res[j], max_el); - } -} - -TEST(gordeva_t_max_val_of_column_matrix_seq, Max_val_of_500_1000_columns_with_random) { - const int rows = 500; - const int cols = 1000; - - std::shared_ptr taskDataSeq = std::make_shared(); - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix = - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); - for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector res(cols, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); - taskDataSeq->outputs_count.emplace_back(res.size()); - - ASSERT_EQ(testTaskSequential.validation(), true); - ASSERT_TRUE(testTaskSequential.pre_processing()); - ASSERT_TRUE(testTaskSequential.run()); - ASSERT_TRUE(testTaskSequential.post_processing()); - - for (int j = 0; j < cols; j++) { - int max_el = matrix[0][j]; - for (int i = 1; i < rows; i++) { - if (matrix[i][j] > max_el) { - max_el = matrix[i][j]; - } - } - ASSERT_EQ(res[j], max_el); - } -} - -TEST(gordeva_t_max_val_of_column_matrix_seq, Max_val_of_1000_3000_columns_with_random) { - const int rows = 1000; - const int cols = 3000; - - std::shared_ptr taskDataSeq = std::make_shared(); - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - std::vector> matrix = - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); - for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector res(cols, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); - taskDataSeq->outputs_count.emplace_back(res.size()); - - ASSERT_EQ(testTaskSequential.validation(), true); - ASSERT_TRUE(testTaskSequential.pre_processing()); - ASSERT_TRUE(testTaskSequential.run()); - ASSERT_TRUE(testTaskSequential.post_processing()); - - for (int j = 0; j < cols; j++) { - int max_el = matrix[0][j]; - for (int i = 1; i < rows; i++) { - if (matrix[i][j] > max_el) { - max_el = matrix[i][j]; - } - } - ASSERT_EQ(res[j], max_el); - } -} - -TEST(gordeva_t_max_val_of_column_matrix_seq, Incorrect_val_size_of_input) { - std::shared_ptr taskDataSeq = std::make_shared(); - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - - taskDataSeq->inputs_count.push_back(10); - taskDataSeq->inputs_count.push_back(0); - taskDataSeq->inputs.push_back(reinterpret_cast(new int[10])); - taskDataSeq->outputs_count.push_back(1); - - ASSERT_FALSE(testTaskSequential.validation()); - - delete[] reinterpret_cast(taskDataSeq->inputs[0]); -} - -TEST(gordeva_t_max_val_of_column_matrix_seq, Incorrect_val_of_output) { - std::shared_ptr taskDataSeq = std::make_shared(); - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - - taskDataSeq->inputs_count.push_back(10); - taskDataSeq->inputs_count.push_back(15); - taskDataSeq->inputs.push_back(reinterpret_cast(new int[150])); - taskDataSeq->outputs_count.push_back(10); - - ASSERT_FALSE(testTaskSequential.validation()); - - delete[] reinterpret_cast(taskDataSeq->inputs[0]); -} diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp deleted file mode 100644 index d457539d242..00000000000 --- a/tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include - -#include "core/task/include/task.hpp" - -namespace gordeva_t_max_val_of_column_matrix_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - static std::vector gen_rand_vec(int size, int lower_bound = 0, int upper_bound = 30); - static std::vector> gen_rand_matr(int rows, int cols); - - private: - std::vector> input_; - std::vector res_; -}; - -} // namespace gordeva_t_max_val_of_column_matrix_seq diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp deleted file mode 100644 index a2052561a87..00000000000 --- a/tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,79 +0,0 @@ -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp" - -TEST(gordeva_t_max_val_of_column_matrix_seq, test_pipeline_run) { - const int cols = 5000; - const int rows = 5000; - - std::shared_ptr taskDataSeq = std::make_shared(); - - auto testTaskSequential = std::make_shared(taskDataSeq); - - std::vector> matrix = - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); - - for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - std::vector res_vec(cols, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res_vec.data())); - taskDataSeq->outputs_count.emplace_back(res_vec.size()); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - for (int i = 0; i < cols; i++) ASSERT_EQ(res_vec[i], 200); -} - -TEST(gordeva_t_max_val_of_column_matrix_seq, test_task_run) { - const int cols = 7000; - const int rows = 7000; - - std::shared_ptr taskDataSeq = std::make_shared(); - - auto testTaskSequential = std::make_shared(taskDataSeq); - - std::vector> matr_rand = - gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(rows, cols); - for (auto &row : matr_rand) taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); - - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - - std::vector res_vec(cols, 0); - taskDataSeq->outputs.emplace_back(reinterpret_cast(res_vec.data())); - taskDataSeq->outputs_count.emplace_back(res_vec.size()); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - for (int i = 0; i < cols; i++) ASSERT_EQ(res_vec[i], 200); -} diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp deleted file mode 100644 index 7e64ae19915..00000000000 --- a/tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp +++ /dev/null @@ -1,83 +0,0 @@ -#include "seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp" - -#include - -using namespace std::chrono_literals; - -namespace gordeva_t_max_val_of_column_matrix_seq { - -bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - - int rows = taskData->inputs_count[0]; - int cols = taskData->inputs_count[1]; - int* input_matr; - input_.resize(rows, std::vector(cols)); - - for (int i = 0; i < rows; i++) { - input_matr = reinterpret_cast(taskData->inputs[i]); - for (int j = 0; j < cols; j++) input_[i][j] = input_matr[j]; - } - - res_.resize(cols); - - return true; -} - -bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::validation() { - internal_order_test(); - - if (taskData->inputs.empty() || taskData->outputs.empty()) return false; - if (taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) return false; - if (taskData->outputs_count.size() != 1) return false; - if (taskData->inputs_count.size() < 2) return false; - if (taskData->outputs_count[0] != taskData->inputs_count[1]) return false; - - return true; -} - -bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::run() { - internal_order_test(); - - for (size_t i = 0; i < input_[0].size(); i++) { - int max_el = input_[0][i]; - for (size_t j = 1; j < input_.size(); j++) - if (input_[j][i] > max_el) max_el = input_[j][i]; - - res_[i] = max_el; - } - - return true; -} - -bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::post_processing() { - internal_order_test(); - - int* output_matr = reinterpret_cast(taskData->outputs[0]); - - std::copy(res_.begin(), res_.end(), output_matr); - return true; -} - -std::vector gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_vec(int size, int lower_bound, - int upper_bound) { - std::vector v(size); - for (auto& number : v) number = lower_bound + (std::rand() % (upper_bound - lower_bound + 1)); - return v; -} - -std::vector> gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::gen_rand_matr(int rows, - int cols) { - std::vector> matr(rows, std::vector(cols)); - - for (int i = 0; i < rows; ++i) { - matr[i] = gen_rand_vec(cols, -500, 500); - } - for (int j = 0; j < cols; ++j) { - int row_rand = std::rand() % rows; - matr[row_rand][j] = 10; - } - return matr; -} - -} // namespace gordeva_t_max_val_of_column_matrix_seq From 9e543f4f65dc4461d3d75a28a65fe50e03c55469 Mon Sep 17 00:00:00 2001 From: Kirius257 <113035841+Kirius257@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:26:23 +0300 Subject: [PATCH 082/155] =?UTF-8?q?=D0=A5=D0=BE=D0=BB=D0=B8=D0=BD=20=D0=9A?= =?UTF-8?q?=D0=B8=D1=80=D0=B8=D0=BB=D0=BB.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=208.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5=20?= =?UTF-8?q?=D0=BE=D1=82=D0=BB=D0=B8=D1=87=D0=B0=D1=8E=D1=89=D0=B8=D1=85?= =?UTF-8?q?=D1=81=D1=8F=20=D0=BF=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D1=8E=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8?= =?UTF-8?q?=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#103)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Описание алгоритмов **Последовательная программа:** 1. Объявляются локальные переменные под хранение наибольшей разницы между соседними элементами вектора и текущий индекс элемента вектора, по которому можно вычислить наибольшую разницу. 2. Получаем итератор текущий(итератор начала),итератор после текущего и итератор после конца. 3. В цикле while обходим вектор, пока не дойдём до конца, и находим разницу по модулю между двумя соседними элементами, на которые указывают итератор после текущего и текущий итератор. 4. Результатом работы цикла будет индекс левого соседнего элемента и наибольшая разница по значению между двумя соседними элементами вектора **Параллельная программа** 1. Отдаём вектор процессу с рангом 0 и вычисляем долю элементов для распределения остальным процессам,. 2. Применяем операцию MPI_Bcast, чтобы отослать вычисленную долю данных вектора процессам. 3. Используем коллективную операцию распределения различающихся данных MPI_Scatter для распределения всем процессам равных по размеру, но различных по элементам частей вектора в соответствующие буферы данных. 4. Каждый процесс посчитает, следуя алгоритму вычисления, приведённого ранее, максимальную разницу(назовём её максимальной дельтой) в своём подвекторе. и упакует результаты в соответствующие буферы данных. 5. Используем коллективную операцию MPI_Reduce и в качестве одного из её параметров выберем операцию MPI_MAX, которая найдёт максимум среди всех максимальных дельт, отправляемых процессами. 6. В качестве результата MPI_Reduce в соответствующей переменной будет храниться искомый результат задачи. --- .../func_tests/main.cpp | 528 ++++++++++++++++++ .../include/ops_mpi.hpp | 330 +++++++++++ .../perf_tests/main.cpp | 102 ++++ .../src/ops_mpi.cpp | 1 + .../func_tests/main.cpp | 291 ++++++++++ .../include/ops_seq.hpp | 121 ++++ .../perf_tests/main.cpp | 80 +++ .../src/ops_seq.cpp | 1 + 8 files changed, 1454 insertions(+) create mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp create mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp create mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp create mode 100644 tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp create mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp create mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp create mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp create mode 100644 tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp new file mode 100644 index 00000000000..166b68246eb --- /dev/null +++ b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp @@ -0,0 +1,528 @@ + +#include + +#include +#include +#include + +#include "mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp" + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_validation) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + const int count_size_vector = 500; + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::vector global_vec; + std::vector global_delta(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + global_vec = std::vector(count_size_vector); + + global_vec[99] = 5000; + global_vec[100] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testMPITaskSequential(taskDataSeq, op); + ASSERT_EQ(testMPITaskSequential.validation(), true); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_pre_processing) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + const int count_size_vector = 500; + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::vector global_vec; + std::vector global_delta(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + global_vec = std::vector(count_size_vector); + + global_vec[99] = 5000; + global_vec[100] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + ASSERT_EQ(testMpiTaskParallel.pre_processing(), true); + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + ASSERT_EQ(testTaskSequential.pre_processing(), true); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_run) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + const int count_size_vector = 160; + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::vector global_vec; + std::vector global_delta(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + global_vec[99] = 5000; + global_vec[100] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + ASSERT_EQ(testMpiTaskParallel.run(), true); + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + ASSERT_EQ(testTaskSequential.run(), true); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_post_processing) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + const int count_size_vector = 500; + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::vector global_vec; + std::vector global_delta(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + global_vec[99] = 5000; + global_vec[100] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + ASSERT_EQ(testMpiTaskParallel.post_processing(), true); + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + ASSERT_EQ(testTaskSequential.post_processing(), true); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_int) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + const int count_size_vector = 200; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + + global_vec[99] = 5000; + global_vec[100] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int test2 = reference_elems[0] - reference_elems[1]; + ASSERT_EQ(test, test2); + } +} +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_int32_t) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + if (ProcRank == 0) { + const int count_size_vector = 300; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 2 * i + 4; + } + global_vec[99] = 5000; + global_vec[100] = 1; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int32_t test2 = reference_elems[0] - reference_elems[1]; + ASSERT_EQ(test, test2); + } +} +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_int_with_random) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + if (ProcRank == 0) { + const int count_size_vector = 299; + global_vec = kholin_k_vector_neighbor_diff_elems_mpi::get_random_vector(count_size_vector); + global_vec[99] = 5000; + global_vec[100] = 1; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int test2 = abs(reference_elems[0] - reference_elems[1]); + ASSERT_EQ(test, test2); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_float) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + const int count_size_vector = 1000; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 0.25 * i + 10; + } + + global_vec[99] = 110.001f; + global_vec[100] = -990.0025f; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + float test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + float test2 = reference_elems[0] - reference_elems[1]; + ASSERT_NEAR(test, test2, 1e-5); + } +} +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_float_with_random) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + const int count_size_vector = 1000; + global_vec = kholin_k_vector_neighbor_diff_elems_mpi::get_random_vector(count_size_vector); + global_vec[99] = 110.001f; + global_vec[100] = -990.0025f; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + float test2 = reference_elems[0] - reference_elems[1]; + ASSERT_NEAR(test, test2, 1e-5); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, check_double) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_delta(1, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (ProcRank == 0) { + const int count_size_vector = 200; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 0.25 * i + 10; + } + + global_vec[99] = 110.001; + global_vec[100] = -990.0025; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_delta.data())); + taskDataPar->outputs_count.emplace_back(global_delta.size()); + } + + kholin_k_vector_neighbor_diff_elems_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar, op); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + double test = global_delta[0]; + + if (ProcRank == 0) { + std::vector reference_delta(1, 0); + std::vector reference_elems(2, 0); + std::vector reference_indices(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_elems.data())); + taskDataSeq->outputs_count.emplace_back(reference_elems.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_indices.data())); + taskDataSeq->outputs_count.emplace_back(reference_indices.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_delta.data())); + taskDataSeq->outputs_count.emplace_back(reference_delta.size()); + + kholin_k_vector_neighbor_diff_elems_mpi::TestTaskSequential testTaskSequential(taskDataSeq, op); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + double test2 = reference_elems[0] - reference_elems[1]; + ASSERT_NEAR(test, test2, 1e-5); + } +} \ No newline at end of file diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp new file mode 100644 index 00000000000..19cbbb2ffc5 --- /dev/null +++ b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp @@ -0,0 +1,330 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace enum_ops { +enum operations { MAX_DIFFERENCE }; +}; + +namespace kholin_k_vector_neighbor_diff_elems_mpi { + +template +std::vector get_random_vector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + + if (std::is_integral::value) { + std::uniform_int_distribution dist(0, 99); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + } else if (std::is_floating_point::value) { + std::uniform_real_distribution dist(0, 99); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + } else { + throw std::invalid_argument("TypeElem must be an integral or floating point type"); + } + + return vec; +} + +template +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_, enum_ops::operations ops_) + : Task(std::move(taskData_)), ops(ops_) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + double result; + TypeIndex left_index; + TypeIndex right_index; + TypeElem left_elem; + TypeElem right_elem; + enum_ops::operations ops; +}; + +template +bool TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); + result = {}; + left_index = {}; + right_index = 2; + left_elem = {}; + right_elem = {}; + return true; +} + +template +bool TestTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 2 && taskData->outputs_count[1] == 2; +} + +template +bool TestTaskSequential::run() { + internal_order_test(); + if (ops == enum_ops::MAX_DIFFERENCE) { + double max_delta = 0; + double delta = 0; + size_t curr_index = 0; + auto iter_curr = input_.begin(); + auto iter_next = iter_curr + 1; + auto iter_end = input_.end() - 1; + auto iter_begin = input_.begin(); + while (iter_curr != iter_end) { + delta = abs(*iter_next - *iter_curr); + if (delta > max_delta) { + if (iter_begin == iter_curr) { + curr_index = 0; + max_delta = delta; + } else { + curr_index = std::distance(input_.begin(), iter_curr); + max_delta = delta; + } + } + iter_curr++; + iter_next = iter_curr + 1; + } + result = max_delta; + right_index = curr_index + 1; + left_index = curr_index; + left_elem = input_[left_index]; + right_elem = input_[right_index]; + } + return true; +} + +template +bool TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = left_elem; + reinterpret_cast(taskData->outputs[0])[1] = right_elem; + reinterpret_cast(taskData->outputs[1])[0] = left_index; + reinterpret_cast(taskData->outputs[1])[1] = right_index; + reinterpret_cast(taskData->outputs[2])[0] = result; + return true; +} + +template +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_, enum_ops::operations ops_) + : Task(std::move(taskData_)), ops(ops_) {} + + MPI_Datatype get_mpi_type(); + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + ~TestMPITaskParallel() override { MPI_Type_free(&mpi_type_elem); } + + private: + std::vector input_; + std::vector local_input_; + int delta_n; + int delta_n_r; + double result; + int residue; + enum_ops::operations ops; + MPI_Datatype mpi_type_elem; + void print_local_data(); + double max_difference(); + double IsJoints_max(); +}; + +template +MPI_Datatype TestMPITaskParallel::get_mpi_type() { + MPI_Type_contiguous(sizeof(TypeElem), MPI_BYTE, &mpi_type_elem); + MPI_Type_commit(&mpi_type_elem); + return mpi_type_elem; +} + +template +bool TestMPITaskParallel::pre_processing() { + internal_order_test(); + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + if (ProcRank == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); + } + result = {}; + return true; +} + +template +bool TestMPITaskParallel::validation() { + internal_order_test(); + mpi_type_elem = get_mpi_type(); + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + if (ProcRank == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +template +bool TestMPITaskParallel::run() { + internal_order_test(); + int ProcRank = 0; + int size = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + if (ProcRank == 0) { + delta_n = taskData->inputs_count[0] / size; + delta_n_r = {}; + } + MPI_Bcast(&delta_n, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); + if (ProcRank == 0) { + residue = taskData->inputs_count[0] - (delta_n * size); + delta_n_r = delta_n + residue; + local_input_ = std::vector(delta_n_r); + } else { + local_input_ = std::vector(delta_n); + } + MPI_Scatter(input_.data(), delta_n, mpi_type_elem, local_input_.data(), delta_n, mpi_type_elem, 0, MPI_COMM_WORLD); + if (ProcRank == 0) { + for (int i = delta_n; i < delta_n_r; i++) { + local_input_[i] = input_[i]; + } + } + double local_result = 0; + local_result = max_difference(); + if (ops == enum_ops::MAX_DIFFERENCE) { + double sendbuf1[1]; + sendbuf1[0] = local_result; + MPI_Reduce(sendbuf1, &result, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); + } + if (ProcRank == 0) { + double joint_result = IsJoints_max(); + if (joint_result > result) { + result = joint_result; + } + } + return true; +} + +template +bool TestMPITaskParallel::post_processing() { + internal_order_test(); + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + if (ProcRank == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } + return true; +} + +template +void TestMPITaskParallel::print_local_data() { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + if (ProcRank == 0) { + std::cout << "I'm proc 0" << "and my local_input data is "; + for (unsigned int i = 0; i < delta_n_r; i++) { + std::cout << local_input_[i] << " "; + } + std::cout << std::endl; + } else { + std::cout << "I'm" << ProcRank << " proc " << "and my local_input data is "; + for (unsigned int i = 0; i < delta_n; i++) { + std::cout << local_input_[i] << " "; + } + std::cout << std::endl; + } +} + +template +double TestMPITaskParallel::max_difference() { + double max_delta = 0; + double delta = 0; + double local_result = 0; + auto iter_curr = local_input_.begin(); + auto iter_next = iter_curr + 1; + auto iter_end = local_input_.end(); + while (iter_curr != iter_end - 1) { + delta = abs(*iter_next - *iter_curr); + if (delta > max_delta) { + max_delta = delta; + } + iter_curr++; + iter_next = iter_curr + 1; + } + local_result = max_delta; + return local_result; +} + +template +double TestMPITaskParallel::IsJoints_max() { + double joint_delta = 0; + auto iter_curr = input_.begin(); + auto iter_prev = iter_curr + 1; + auto iter_end = input_.end(); + double max_joint_delta = 0; + int res_i = 0; + while (iter_curr != iter_end) { + if (residue == 0) { + iter_curr = iter_curr + delta_n; + iter_prev = iter_curr - 1; + if (iter_curr == iter_end) { + break; + } + joint_delta = abs(*iter_curr - *iter_prev); + if (joint_delta > max_joint_delta) { + max_joint_delta = joint_delta; + } + } else { + if (res_i == 0) { + iter_curr = iter_curr + delta_n_r; + iter_prev = iter_curr - 1; + joint_delta = abs(*iter_curr - *iter_prev); + if (joint_delta > max_joint_delta) { + max_joint_delta = joint_delta; + } + res_i++; + } else { + iter_curr = iter_curr + delta_n; + iter_prev = iter_curr - 1; + if (iter_curr == iter_end) { + break; + } + joint_delta = abs(*iter_curr - *iter_prev); + if (joint_delta > max_joint_delta) { + max_joint_delta = joint_delta; + } + } + } + } + return max_joint_delta; +} +} // namespace kholin_k_vector_neighbor_diff_elems_mpi \ No newline at end of file diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp new file mode 100644 index 00000000000..b43ba0fc1c6 --- /dev/null +++ b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp @@ -0,0 +1,102 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp" + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, test_pipeline_run) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_elems(2, 0); + std::vector global_indices(2, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + std::shared_ptr taskDataPar = std::make_shared(); + if (ProcRank == 0) { + const int count_size_vector = 100000001; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + + global_vec[10] = 5000; + global_vec[11] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_elems.data())); + taskDataPar->outputs_count.emplace_back(global_elems.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices.data())); + taskDataPar->outputs_count.emplace_back(global_indices.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar, op); + testMpiTaskParallel->validation(); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (ProcRank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} + +TEST(kholin_k_vector_neighbor_diff_elems_mpi, test_task_run) { + int ProcRank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &ProcRank); + std::vector global_vec; + std::vector global_elems(2, 0); + std::vector global_indices(2, 0); + enum_ops::operations op = enum_ops::MAX_DIFFERENCE; + + std::shared_ptr taskDataPar = std::make_shared(); + if (ProcRank == 0) { + const int count_size_vector = 100000001; + global_vec = std::vector(count_size_vector); + for (size_t i = 0; i < global_vec.size(); i++) { + global_vec[i] = 4 * i + 2; + } + + global_vec[10] = 5000; + global_vec[11] = 1; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_elems.data())); + taskDataPar->outputs_count.emplace_back(global_elems.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_indices.data())); + taskDataPar->outputs_count.emplace_back(global_indices.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar, op); + testMpiTaskParallel->validation(); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (ProcRank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} \ No newline at end of file diff --git a/tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp new file mode 100644 index 00000000000..94c0fb83537 --- /dev/null +++ b/tasks/mpi/kholin_k_vector_neighbor_diff_elems/src/ops_mpi.cpp @@ -0,0 +1 @@ +#include "mpi/kholin_k_vector_neighbor_diff_elems/include/ops_mpi.hpp" \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp new file mode 100644 index 00000000000..02d2850d6ed --- /dev/null +++ b/tasks/seq/kholin_k_vector_neighbor_diff_elems/func_tests/main.cpp @@ -0,0 +1,291 @@ +#include + +#include + +#include "core/task/include/task.hpp" +#include "seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp" + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_pre_processing) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + + EXPECT_EQ(testTaskSequential.pre_processing(), true); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_validation) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + EXPECT_EQ(testTaskSequential.validation(), true); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_run) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + EXPECT_EQ(testTaskSequential.run(), true); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_post_processing) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + EXPECT_EQ(testTaskSequential.post_processing(), true); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_int32_t) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = 2 * i; + } + in[234] = 0; + in[235] = 4000; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_EQ(out[0], 0l); + EXPECT_EQ(out[1], 4000l); + EXPECT_EQ(out_index[0], 234ull); + EXPECT_EQ(out_index[1], 235ull); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, check_int_with_random) { + std::vector in(1256, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + in = kholin_k_vector_neighbor_diff_elems_seq::get_random_vector(1256); + in[234] = 0; + in[235] = 4000; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_EQ(out[0], 0l); + EXPECT_EQ(out[1], 4000l); + EXPECT_EQ(out_index[0], 234ull); + EXPECT_EQ(out_index[1], 235ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_double) { + std::vector in(25680, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = i; + } + in[189] = -1000.1; + in[190] = 9000.9; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_NEAR(out[0], -1000.1, 1e-6); + EXPECT_NEAR(out[1], 9000.9, 1e-6); + EXPECT_EQ(out_index[0], 189ull); + EXPECT_EQ(out_index[1], 190ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_int8_t) { + std::vector in(250, -1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + if (i % 2 == 0) { + in[i] = -50; + } else { + in[i] = 50; + } + } + in[5] = 56; + in[6] = -56; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_EQ(out[0], 56); + EXPECT_EQ(out[1], -56); + EXPECT_EQ(out_index[0], 5ull); + EXPECT_EQ(out_index[1], 6ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_int64_t) { + std::vector in(75836, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + if (i % 3 == 0) { + in[i] = 10; + } + if (i % 3 == 1) { + in[i] = 30; + } + if (i % 3 == 2) { + in[i] = 70; + } + } + in[20] = -1000; + in[21] = 1119; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_EQ(out[0], -1000ll); + EXPECT_EQ(out[1], 1119ll); + EXPECT_EQ(out_index[0], 20ull); + EXPECT_EQ(out_index[1], 21ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_float) { + std::vector in(20, 1.0f); + std::vector out(2, 0.0f); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] += (i + 1.0f) * 2.5f; + } + in[0] = 110.001f; + in[1] = -990.0025f; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_NEAR(out[0], 110.001f, 1e-4); + EXPECT_NEAR(out[1], -990.0025f, 1e-4); + EXPECT_EQ(out_index[0], 0ull); + EXPECT_EQ(out_index[1], 1ull); +} + +TEST(kholin_k_vector_neighbour_diff_elems_seq, check_float_with_random) { + std::vector in(20, 1.0f); + std::vector out(2, 0.0f); + std::vector out_index(2, 0); + in = kholin_k_vector_neighbor_diff_elems_seq::get_random_vector(20); + in[0] = 110.001f; + in[1] = -990.0025f; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + kholin_k_vector_neighbor_diff_elems_seq::TestTaskSequential testTaskSequential(taskData); + testTaskSequential.validation(); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + EXPECT_NEAR(out[0], 110.001f, 1e-4); + EXPECT_NEAR(out[1], -990.0025f, 1e-4); + EXPECT_EQ(out_index[0], 0ull); + EXPECT_EQ(out_index[1], 1ull); +} \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp new file mode 100644 index 00000000000..77e777bab23 --- /dev/null +++ b/tasks/seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +using namespace std::chrono_literals; + +namespace kholin_k_vector_neighbor_diff_elems_seq { + +template +std::vector get_random_vector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + + if (std::is_integral::value) { + std::uniform_int_distribution dist(0, 99); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + } else if (std::is_floating_point::value) { + std::uniform_real_distribution dist(0, 99); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + } else { + throw std::invalid_argument("TypeElem must be an integral or floating point type"); + } + + return vec; +} + +template +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + double result; + TypeIndex left_index; + TypeIndex right_index; + TypeElem left_elem; + TypeElem right_elem; +}; + +template +bool TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(ptr, ptr + taskData->inputs_count[0], input_.begin()); + result = {}; + left_index = {}; + right_index = 2; + left_elem = {}; + right_elem = {}; + return true; +} + +template +bool TestTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 2 && taskData->outputs_count[1] == 2; +} + +template +bool TestTaskSequential::run() { + internal_order_test(); + double max_delta = 0; + double delta = 0; + size_t curr_index = 0; + auto iter_curr = input_.begin(); + auto iter_next = iter_curr + 1; + auto iter_end = input_.end() - 1; + auto iter_begin = input_.begin(); + while (iter_curr != iter_end) { + delta = fabs((double)(*iter_next - *iter_curr)); + if (delta > max_delta) { + if (iter_begin == iter_curr) { + curr_index = 0; + max_delta = delta; + } else { + curr_index = std::distance(input_.begin(), iter_curr); + max_delta = delta; + } + } + iter_curr++; + iter_next = iter_curr + 1; + } + result = max_delta; + right_index = curr_index + 1; + left_index = curr_index; + left_elem = input_[left_index]; + + right_elem = input_[right_index]; + return true; +} + +template +bool TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = left_elem; + reinterpret_cast(taskData->outputs[0])[1] = right_elem; + reinterpret_cast(taskData->outputs[1])[0] = left_index; + reinterpret_cast(taskData->outputs[1])[1] = right_index; + return true; +} +} // namespace kholin_k_vector_neighbor_diff_elems_seq \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp new file mode 100644 index 00000000000..d4e8ad2457f --- /dev/null +++ b/tasks/seq/kholin_k_vector_neighbor_diff_elems/perf_tests/main.cpp @@ -0,0 +1,80 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp" + +TEST(kholin_k_vector_neighbor_diff_elems_seq, test_pipeline_run) { + const int count = 20000000; + + std::vector in(count, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = i; + } + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + auto testTaskSequential = + std::make_shared>(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} + +TEST(kholin_k_vector_neighbor_diff_elems_seq, test_task_run) { + const int count = 250000000; + + std::vector in(count, 1); + std::vector out(2, 0); + std::vector out_index(2, 0); + for (size_t i = 0; i < in.size(); i++) { + in[i] = i; + } + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + taskData->outputs.emplace_back(reinterpret_cast(out_index.data())); + taskData->outputs_count.emplace_back(out_index.size()); + + auto testTaskSequential = + std::make_shared>(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} \ No newline at end of file diff --git a/tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp b/tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp new file mode 100644 index 00000000000..c67ad3aabd7 --- /dev/null +++ b/tasks/seq/kholin_k_vector_neighbor_diff_elems/src/ops_seq.cpp @@ -0,0 +1 @@ +#include "seq/kholin_k_vector_neighbor_diff_elems/include/ops_seq.hpp" \ No newline at end of file From 72d991447b8a3cf56364933d98641ec51d8946a3 Mon Sep 17 00:00:00 2001 From: IljarSmirnov <120449176+IljarSmirnov@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:27:29 +0300 Subject: [PATCH 083/155] =?UTF-8?q?=D0=A1=D0=BC=D0=B8=D1=80=D0=BD=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=98=D0=BB=D1=8C=D1=8F.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2019.=20=D0=98=D0=BD=D1=82=D0=B5=D0=B3=D1=80=D0=B8=D1=80=D0=BE?= =?UTF-8?q?=D0=B2=D0=B0=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B5=D1=82=D0=BE=D0=B4?= =?UTF-8?q?=D0=BE=D0=BC=20=D0=BF=D1=80=D1=8F=D0=BC=D0=BE=D1=83=D0=B3=D0=BE?= =?UTF-8?q?=D0=BB=D1=8C=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2.=20(#104)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit seq: Считается интеграл для функции f на отрезке [left, right]. Для этого отрезок разбивается на n прямоугольников, у каждого из них ищется длина. Затем берется значение функции в точке середины прямоугольника и умножается на длину прямоугольника. Полученное значение прибавляется к результирующей сумме. mpi: Потокам передаются границы отрезка и количество прямоугольников. Общий отрезок интегрирования делится по количеству потоков: получаются границы self_left, self_right Относительно этих границ каждый поток разбивает отрезок на части и вычисляет интеграл аналогично последовательному случаю После расчетов результаты суммируются с помощью reduce --- .../func_tests/main.cpp | 323 ++++++++++++++++++ .../include/ops_mpi.hpp | 53 +++ .../perf_tests/main.cpp | 90 +++++ .../src/ops_mpi.cpp | 127 +++++++ .../func_tests/main.cpp | 164 +++++++++ .../include/ops_seq.hpp | 26 ++ .../perf_tests/main.cpp | 67 ++++ .../src/ops_seq.cpp | 52 +++ 8 files changed, 902 insertions(+) create mode 100644 tasks/mpi/smirnov_i_integration_by_rectangles/func_tests/main.cpp create mode 100644 tasks/mpi/smirnov_i_integration_by_rectangles/include/ops_mpi.hpp create mode 100644 tasks/mpi/smirnov_i_integration_by_rectangles/perf_tests/main.cpp create mode 100644 tasks/mpi/smirnov_i_integration_by_rectangles/src/ops_mpi.cpp create mode 100644 tasks/seq/smirnov_i_integration_by_rectangles/func_tests/main.cpp create mode 100644 tasks/seq/smirnov_i_integration_by_rectangles/include/ops_seq.hpp create mode 100644 tasks/seq/smirnov_i_integration_by_rectangles/perf_tests/main.cpp create mode 100644 tasks/seq/smirnov_i_integration_by_rectangles/src/ops_seq.cpp diff --git a/tasks/mpi/smirnov_i_integration_by_rectangles/func_tests/main.cpp b/tasks/mpi/smirnov_i_integration_by_rectangles/func_tests/main.cpp new file mode 100644 index 00000000000..4635e0bf7f0 --- /dev/null +++ b/tasks/mpi/smirnov_i_integration_by_rectangles/func_tests/main.cpp @@ -0,0 +1,323 @@ +#include + +#include +#include +#include +#include + +#include "mpi/smirnov_i_integration_by_rectangles/include/ops_mpi.hpp" +double f1(double x) { return x * x; } +double f2(double x) { return std::exp(x); } +double f3(double x) { return std::sin(x); } +double f_const(double x) { return 5 + 0 * x; } +double f_lin(double x) { return x; } + +TEST(smirnov_i_integration_by_rectangles_mpi, Test_invalid_fun_mpi) { + boost::mpi::communicator world; + double left = 0.0; + double right = 1.0; + int n_ = 1000; + std::vector global_res(1, 0.0); + std::vector result_seq(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::uint32_t o_size = 1; + std::uint32_t i_size = 3; + taskDataPar->inputs.emplace_back(reinterpret_cast(&left)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&right)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataPar->inputs_count.emplace_back(i_size); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(o_size); + } + + smirnov_i_integration_by_rectangles::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.set_function(nullptr); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + ASSERT_ANY_THROW(testMpiTaskParallel.run()); +} +TEST(smirnov_i_integration_by_rectangles_seq, Test_prime_n) { + double left = 0; + double right = 1; + int n = 997; + double expected_result = 5; + std::vector res(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f_const); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(res[0], expected_result, 1e-6); +} +TEST(smirnov_i_integration_by_rectangles_mpi, Test_const) { + boost::mpi::communicator world; + double left = 0.0; + double right = 1.0; + int n_ = 1000; + std::vector global_res(1, 0.0); + std::vector result_seq(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::uint32_t o_size = 1; + std::uint32_t i_size = 3; + taskDataPar->inputs.emplace_back(reinterpret_cast(&left)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&right)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataPar->inputs_count.emplace_back(i_size); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(o_size); + } + + smirnov_i_integration_by_rectangles::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.set_function(f_const); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_seq.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f_const); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(result_seq[0], global_res[0], 1e-3); + } +} + +TEST(smirnov_i_integration_by_rectangles_mpi, Test_linear) { + boost::mpi::communicator world; + double left = 0.0; + double right = 1.0; + int n_ = 1000; + std::vector global_res(1, 0.0); + std::vector result_seq(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&left)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&right)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataPar->inputs_count.emplace_back(3); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + smirnov_i_integration_by_rectangles::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.set_function(f_lin); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_seq.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f_lin); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(result_seq[0], global_res[0], 1e-3); + } +} +TEST(smirnov_i_integration_by_rectangles, Test_x_times_x) { + boost::mpi::communicator world; + double left = 0.0; + double right = 1.0; + int n_ = 1000; + std::vector global_res(1, 0.0); + std::vector result_seq(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::uint32_t o_size = 1; + std::uint32_t i_size = 3; + taskDataPar->inputs.emplace_back(reinterpret_cast(&left)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&right)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataPar->inputs_count.emplace_back(i_size); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(o_size); + } + smirnov_i_integration_by_rectangles::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.set_function(f1); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_seq.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f1); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(result_seq[0], global_res[0], 1e-3); + } +} + +TEST(smirnov_i_integration_by_rectangles_mpi, Test_e_x) { + boost::mpi::communicator world; + double left = 0.0; + double right = 1.0; + int n_ = 1000; + std::vector global_res(1, 0.0); + std::vector result_seq(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&left)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&right)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataPar->inputs_count.emplace_back(3); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + smirnov_i_integration_by_rectangles::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.set_function(f2); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_seq.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f2); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(result_seq[0], global_res[0], 1e-3); + } +} + +TEST(smirnov_i_integration_by_rectangles_mpi, Test_sin_x) { + boost::mpi::communicator world; + double left = 0.0; + double right = 1.0; + int n_ = 1000; + std::vector global_res(1, 0.0); + std::vector result_seq(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&left)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&right)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataPar->inputs_count.emplace_back(3); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + smirnov_i_integration_by_rectangles::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.set_function(f3); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_seq.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f3); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(result_seq[0], global_res[0], 1e-3); + } +} diff --git a/tasks/mpi/smirnov_i_integration_by_rectangles/include/ops_mpi.hpp b/tasks/mpi/smirnov_i_integration_by_rectangles/include/ops_mpi.hpp new file mode 100644 index 00000000000..8c5675ea018 --- /dev/null +++ b/tasks/mpi/smirnov_i_integration_by_rectangles/include/ops_mpi.hpp @@ -0,0 +1,53 @@ +#pragma once +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace smirnov_i_integration_by_rectangles { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(double (*func)(double)); + + private: + double res{}; + double left{}; + double right{}; + int n_{}; + double seq_integrate_rect(double (*func)(double), double left_, double right_, int n); + double (*f)(double) = nullptr; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(double (*func)(double)); + + private: + double glob_res{}; + double left{}; + double right{}; + int n_{}; + boost::mpi::communicator world; + double mpi_integrate_rect(double (*func)(double), double left_, double right_, int n); + double (*f)(double) = nullptr; +}; +} // namespace smirnov_i_integration_by_rectangles diff --git a/tasks/mpi/smirnov_i_integration_by_rectangles/perf_tests/main.cpp b/tasks/mpi/smirnov_i_integration_by_rectangles/perf_tests/main.cpp new file mode 100644 index 00000000000..beaece1804b --- /dev/null +++ b/tasks/mpi/smirnov_i_integration_by_rectangles/perf_tests/main.cpp @@ -0,0 +1,90 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/smirnov_i_integration_by_rectangles/include/ops_mpi.hpp" +double f1(double x) { return x * x; } +TEST(smirnov_i_integration_by_rectangles_mpi, test_pipeline_run) { + boost::mpi::communicator world; + double left = 0.0; + double right = 1.0; + int n_ = 1000; + std::vector global_res(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&left)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&right)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->inputs_count.emplace_back(3); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + testMpiTaskParallel->set_function(f1); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_result = 1. / 3; + ASSERT_NEAR(global_res[0], expected_result, 1e-5); + } +} +TEST(smirnov_i_integration_by_rectangles_mpi, test_task_run) { + boost::mpi::communicator world; + double left = 0.0; + double right = 1.0; + int n_ = 1000; + std::vector global_res(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&left)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&right)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n_)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->inputs_count.emplace_back(3); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + testMpiTaskParallel->set_function(f1); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_result = 1. / 3; + ASSERT_NEAR(global_res[0], expected_result, 1e-5); + } +} diff --git a/tasks/mpi/smirnov_i_integration_by_rectangles/src/ops_mpi.cpp b/tasks/mpi/smirnov_i_integration_by_rectangles/src/ops_mpi.cpp new file mode 100644 index 00000000000..b4d0756787c --- /dev/null +++ b/tasks/mpi/smirnov_i_integration_by_rectangles/src/ops_mpi.cpp @@ -0,0 +1,127 @@ +#include "mpi/smirnov_i_integration_by_rectangles/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool smirnov_i_integration_by_rectangles::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + left = reinterpret_cast(taskData->inputs[0])[0]; + right = reinterpret_cast(taskData->inputs[1])[0]; + n_ = reinterpret_cast(taskData->inputs[2])[0]; + } + return true; +} +bool smirnov_i_integration_by_rectangles::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + + return true; +} +bool smirnov_i_integration_by_rectangles::TestMPITaskParallel::run() { + internal_order_test(); + broadcast(world, left, 0); + broadcast(world, right, 0); + broadcast(world, n_, 0); + double local_result_{}; + local_result_ = mpi_integrate_rect(f, left, right, n_); + reduce(world, local_result_, glob_res, std::plus<>(), 0); + return true; +} +bool smirnov_i_integration_by_rectangles::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = glob_res; + } + return true; +} +void smirnov_i_integration_by_rectangles::TestMPITaskParallel::set_function(double (*func)(double)) { f = func; } + +double smirnov_i_integration_by_rectangles::TestMPITaskParallel::mpi_integrate_rect(double (*func)(double), + double left_, double right_, + int n) { + if (func == nullptr) { + throw std::logic_error("func is nullptr"); + } + int rank; + int size; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + int chunks = n / size; + int dop = n % size; + if (dop != 0) { + if (rank < dop) { + chunks++; + } + } + double res_integr = 0; + + if (rank == 0) { + const double self_left = left_; + const double self_right = left_ + (right_ - left_) / size; + const double len_of_rect = (self_right - self_left) / chunks; + for (int i = 0; i < chunks; i++) { + const double left_rect = self_left + i * len_of_rect; + res_integr += f(left_rect + len_of_rect / 2); + } + res_integr *= len_of_rect; + double recv; + for (int i = 1; i < size; i++) { + MPI_Recv(&recv, 1, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUSES_IGNORE); + res_integr += recv; + } + } else { + const double gap_for_proc = (right_ - left_) / size; + double self_res_integr = 0; + const double self_left = left_ + gap_for_proc * rank; + const double self_right = left_ + gap_for_proc * (rank + 1); + const double len_of_rect = (self_right - self_left) / chunks; + for (int i = 0; i < chunks; i++) { + const double left_rect = self_left + i * len_of_rect; + self_res_integr += f(left_rect + len_of_rect / 2); + } + self_res_integr *= len_of_rect; + MPI_Send(&self_res_integr, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD); + } + + return res_integr; +} + +bool smirnov_i_integration_by_rectangles::TestMPITaskSequential::pre_processing() { + internal_order_test(); + left = reinterpret_cast(taskData->inputs[0])[0]; + right = reinterpret_cast(taskData->inputs[1])[0]; + n_ = reinterpret_cast(taskData->inputs[2])[0]; + res = 0; + return true; +} +bool smirnov_i_integration_by_rectangles::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} +bool smirnov_i_integration_by_rectangles::TestMPITaskSequential::run() { + internal_order_test(); + const double len_of_rect = (right - left) / n_; + for (int i = 0; i < n_; i++) { + const double left_rect = left + i * len_of_rect; + res += f(left_rect + len_of_rect / 2); + } + res *= len_of_rect; + return true; +} +bool smirnov_i_integration_by_rectangles::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} +void smirnov_i_integration_by_rectangles::TestMPITaskSequential::set_function(double (*func)(double)) { f = func; } diff --git a/tasks/seq/smirnov_i_integration_by_rectangles/func_tests/main.cpp b/tasks/seq/smirnov_i_integration_by_rectangles/func_tests/main.cpp new file mode 100644 index 00000000000..001601de490 --- /dev/null +++ b/tasks/seq/smirnov_i_integration_by_rectangles/func_tests/main.cpp @@ -0,0 +1,164 @@ +#include + +#include +#include + +#include "seq/smirnov_i_integration_by_rectangles/include/ops_seq.hpp" +double f1(double x) { return x * x; } +double f2(double x) { return std::exp(x); } +double f3(double x) { return std::sin(x); } +double f_const(double x) { return 5 + 0 * x; } +double f_lin(double x) { return x; } +TEST(smirnov_i_integration_by_rectangles_seq, Test_invalid_func) { + double left = 0; + double right = 1; + int n = 1000; + std::vector res(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(nullptr); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + ASSERT_ANY_THROW(testMpiTaskSequential.run()); +} +TEST(smirnov_i_integration_by_rectangles_seq, Test_const) { + double left = 0; + double right = 1; + int n = 1000; + double expected_result = 5; + std::vector res(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f_const); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(res[0], expected_result, 1e-6); +} +TEST(smirnov_i_integration_by_rectangles_seq, Test_linear) { + double left = 0; + double right = 1; + int n = 1000; + double expected_result = 1. / 2; + std::vector res(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f_lin); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(expected_result, res[0], 1e-6); +} +TEST(smirnov_i_integration_by_rectangles_seq, Test_x_times_x) { + double left = 0; + double right = 1; + int n = 1000; + double expected_result = 1. / 3; + std::vector res(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f1); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(expected_result, res[0], 1e-6); +} +TEST(smirnov_i_integration_by_rectangles_seq, Test_e_x) { + double left = 0; + double right = 1; + int n = 1000; + double expected_result = std::exp(1.0) - 1; + std::vector res(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f2); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(expected_result, res[0], 1e-6); +} + +TEST(smirnov_i_integration_by_rectangles_seq, Test_sin_x) { + double left = 0; + double right = 1; + int n = 1000; + double expected_result = 1 - std::cos(1); + std::vector res(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + smirnov_i_integration_by_rectangles::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + testMpiTaskSequential.set_function(f3); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(expected_result, res[0], 1e-6); +} diff --git a/tasks/seq/smirnov_i_integration_by_rectangles/include/ops_seq.hpp b/tasks/seq/smirnov_i_integration_by_rectangles/include/ops_seq.hpp new file mode 100644 index 00000000000..3b31d5db7da --- /dev/null +++ b/tasks/seq/smirnov_i_integration_by_rectangles/include/ops_seq.hpp @@ -0,0 +1,26 @@ +#pragma once +#include + +#include + +#include "core/task/include/task.hpp" + +namespace smirnov_i_integration_by_rectangles { +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + void set_function(double (*func)(double)); + + private: + double res{}; + double left_{}; + double right_{}; + int n_{}; + double seq_integrate_rect(double (*func)(double), double left, double right, int n); + double (*f)(double) = nullptr; +}; +} // namespace smirnov_i_integration_by_rectangles diff --git a/tasks/seq/smirnov_i_integration_by_rectangles/perf_tests/main.cpp b/tasks/seq/smirnov_i_integration_by_rectangles/perf_tests/main.cpp new file mode 100644 index 00000000000..03abc47f346 --- /dev/null +++ b/tasks/seq/smirnov_i_integration_by_rectangles/perf_tests/main.cpp @@ -0,0 +1,67 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/smirnov_i_integration_by_rectangles/include/ops_seq.hpp" +double f1(double x) { return x * x; } +TEST(smirnov_i_integration_by_rectangles_seq, test_pipeline_run) { + double left = 0; + double right = 1; + int n = 1000; + double expected_result = 1. / 3; + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(3); + std::vector res(1, 0.0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(1); + auto testTaskSequential = std::make_shared(taskDataSeq); + testTaskSequential->set_function(f1); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(res[0], expected_result, 1e-5); +} + +TEST(smirnov_i_integration_by_rectangles_seq, test_task_run) { + double left = 0; + double right = 1; + int n = 1000; + double expected_result = 1. / 3; + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&left)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&right)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(3); + std::vector res(1, 0.0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(1); + auto testTaskSequential = std::make_shared(taskDataSeq); + testTaskSequential->set_function(f1); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(res[0], expected_result, 1e-5); +} diff --git a/tasks/seq/smirnov_i_integration_by_rectangles/src/ops_seq.cpp b/tasks/seq/smirnov_i_integration_by_rectangles/src/ops_seq.cpp new file mode 100644 index 00000000000..7ed3fa625c2 --- /dev/null +++ b/tasks/seq/smirnov_i_integration_by_rectangles/src/ops_seq.cpp @@ -0,0 +1,52 @@ +#include "seq/smirnov_i_integration_by_rectangles/include/ops_seq.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; +bool smirnov_i_integration_by_rectangles::TestMPITaskSequential::pre_processing() { + internal_order_test(); + left_ = reinterpret_cast(taskData->inputs[0])[0]; + right_ = reinterpret_cast(taskData->inputs[1])[0]; + n_ = reinterpret_cast(taskData->inputs[2])[0]; + res = 0; + return true; +} + +bool smirnov_i_integration_by_rectangles::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} +bool smirnov_i_integration_by_rectangles::TestMPITaskSequential::run() { + internal_order_test(); + res = seq_integrate_rect(f, left_, right_, n_); + return true; +} + +bool smirnov_i_integration_by_rectangles::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} +void smirnov_i_integration_by_rectangles::TestMPITaskSequential::set_function(double (*func)(double)) { f = func; } + +double smirnov_i_integration_by_rectangles::TestMPITaskSequential::seq_integrate_rect(double (*func)(double), + double left, double right, + int n) { + if (func == nullptr) { + throw std::logic_error("func is nullptr"); + } + double res_integr = 0; + const double self_left = left; + const double self_right = right; + const double len_of_rect = (self_right - self_left) / n; + for (int i = 0; i < n; i++) { + const double left_rect = self_left + i * len_of_rect; + res_integr += f(left_rect + len_of_rect / 2); + } + res_integr *= len_of_rect; + return res_integr; +} From 8dcfc2eb606320a2bb245186f250a624a3441da5 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 4 Nov 2024 10:28:12 +0800 Subject: [PATCH 084/155] =?UTF-8?q?Revert=20"=D0=9E=D1=82=D1=83=D1=80?= =?UTF-8?q?=D0=B8=D0=BD=20=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4?= =?UTF-8?q?=D1=80.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2015.=20=D0=9D=D0=B0=D1=85?= =?UTF-8?q?=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA?= =?UTF-8?q?=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7?= =?UTF-8?q?=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20?= =?UTF-8?q?=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82?= =?UTF-8?q?=D1=80=D0=B8=D1=86=D1=8B."=20(#183)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#75 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11656007273/job/32451509871 image --- .../func_tests/main.cpp | 326 ------------------ .../include/ops_mpi.hpp | 58 ---- .../perf_tests/main.cpp | 106 ------ .../src/ops_mpi.cpp | 151 -------- .../func_tests/main.cpp | 122 ------- .../include/ops_seq.hpp | 25 -- .../perf_tests/main.cpp | 84 ----- .../src/ops_seq.cpp | 43 --- 8 files changed, 915 deletions(-) delete mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp delete mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp delete mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp delete mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp delete mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp delete mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp delete mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp delete mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp deleted file mode 100644 index f04d61b8035..00000000000 --- a/tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp +++ /dev/null @@ -1,326 +0,0 @@ -#include - -#include -#include -#include -#include - -#include "mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp" - -std::vector oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -// squarelike -TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_1) { - size_t n = 5; - size_t m = 5; - - boost::mpi::communicator world; - - std::vector global_mat; - std::vector global_max(m, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - taskDataPar->inputs_count.emplace_back(n); - taskDataPar->inputs_count.emplace_back(m); - if (world.rank() == 0) { - global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(m, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (size_t i = 0; i < global_max.size(); i++) { - ASSERT_EQ(reference_max[i], global_max[i]); - } - } -} - -// rectangular -TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_2) { - size_t n = 10; - size_t m = 15; - - boost::mpi::communicator world; - - std::vector global_mat; - std::vector global_max(m, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - taskDataPar->inputs_count.emplace_back(n); - taskDataPar->inputs_count.emplace_back(m); - if (world.rank() == 0) { - global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(m, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (size_t i = 0; i < global_max.size(); i++) { - ASSERT_EQ(reference_max[i], global_max[i]); - } - } -} - -TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_3) { - size_t n = 15; - size_t m = 10; - - boost::mpi::communicator world; - - std::vector global_mat; - std::vector global_max(m, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - taskDataPar->inputs_count.emplace_back(n); - taskDataPar->inputs_count.emplace_back(m); - if (world.rank() == 0) { - global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(m, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (size_t i = 0; i < global_max.size(); i++) { - ASSERT_EQ(reference_max[i], global_max[i]); - } - } -} - -TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_4) { - size_t n = 1; - size_t m = 15; - - boost::mpi::communicator world; - - std::vector global_mat; - std::vector global_max(m, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - taskDataPar->inputs_count.emplace_back(n); - taskDataPar->inputs_count.emplace_back(m); - if (world.rank() == 0) { - global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(m, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (size_t i = 0; i < global_max.size(); i++) { - ASSERT_EQ(reference_max[i], global_max[i]); - } - } -} - -TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_5) { - size_t n = 15; - size_t m = 1; - - boost::mpi::communicator world; - - std::vector global_mat; - std::vector global_max(m, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - taskDataPar->inputs_count.emplace_back(n); - taskDataPar->inputs_count.emplace_back(m); - if (world.rank() == 0) { - global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(m, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (size_t i = 0; i < global_max.size(); i++) { - ASSERT_EQ(reference_max[i], global_max[i]); - } - } -} - -TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_EMPTY) { - size_t n = 0; - size_t m = 0; - - boost::mpi::communicator world; - - std::vector global_mat; - std::vector global_max(m, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - taskDataPar->inputs_count.emplace_back(n); - taskDataPar->inputs_count.emplace_back(m); - if (world.rank() == 0) { - global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(m, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - for (size_t i = 0; i < global_max.size(); i++) { - ASSERT_EQ(reference_max[i], global_max[i]); - } - } -} \ No newline at end of file diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp deleted file mode 100644 index a4c5cbf561d..00000000000 --- a/tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp +++ /dev/null @@ -1,58 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace oturin_a_max_values_by_rows_matrix_mpi { - -std::vector getRandomVector(int sz); - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - size_t n = 0; - size_t m = 0; - std::vector input_; - std::vector res; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - /* - m maxes: - ^ - | -9 99 : 99 - | 12 06 : 12 - +------> n - */ - size_t n = 0; - size_t m = 0; - std::vector input_, local_input_; - std::vector res; - - boost::mpi::communicator world; -}; - -} // namespace oturin_a_max_values_by_rows_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp deleted file mode 100644 index b5ed07d8a2f..00000000000 --- a/tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,106 +0,0 @@ -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp" - -std::vector oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -TEST(oturin_a_max_values_by_rows_matrix_mpi_perftest, test_pipeline_run) { - size_t n = 300; - size_t m = 300; - - boost::mpi::communicator world; - - std::vector global_mat; - std::vector global_max(m, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - taskDataPar->inputs_count.emplace_back(n); - taskDataPar->inputs_count.emplace_back(m); - if (world.rank() == 0) { - global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ((int)(n * m), global_max[0]); - } -} - -TEST(oturin_a_max_values_by_rows_matrix_mpi_perftest, test_task_run) { - size_t n = 300; - size_t m = 300; - - boost::mpi::communicator world; - - std::vector global_mat; - std::vector global_max(m, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - taskDataPar->inputs_count.emplace_back(n); - taskDataPar->inputs_count.emplace_back(m); - if (world.rank() == 0) { - global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ((int)(n * m), global_max[0]); - } -} diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp deleted file mode 100644 index 2e8f40c19d4..00000000000 --- a/tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp +++ /dev/null @@ -1,151 +0,0 @@ -#include "mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include - -bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - n = (size_t)(taskData->inputs_count[0]); - m = (size_t)(taskData->inputs_count[1]); - input_ = std::vector(n * m); - int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); - input_ = std::vector(tmp_ptr, tmp_ptr + n * m); - // Init values for output - res = std::vector(m, 0); - return true; -} - -bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - // Check elements count in i/o - // m & maxes: - return taskData->inputs_count[1] == taskData->outputs_count[0]; -} - -bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::run() { - internal_order_test(); - for (size_t i = 0; i < m; i++) { - res[i] = *std::max_element(input_.begin() + i * n, input_.begin() + (i + 1) * n); - } - return true; -} - -bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - for (size_t i = 0; i < m; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res[i]; - } - return true; -} -//////////////////////////////////////////////////////////////////////////////////////// - -bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - return taskData->inputs_count[1] == taskData->outputs_count[0]; - } - return true; -} - -bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - // Init vectors - n = (size_t)(taskData->inputs_count[0]); - m = (size_t)(taskData->inputs_count[1]); - - if (world.rank() == 0) { - input_ = std::vector(n * m); - int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); - input_ = std::vector(tmp_ptr, tmp_ptr + n * m); - // Init values for output - res = std::vector(m, 0); - } - - return true; -} - -bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::run() { - internal_order_test(); - const int TAG_EXIT = 1; - const int TAG_TOBASE = 2; - const int TAG_TOSAT = 3; - -#if defined(_MSC_VER) && !defined(__clang__) - if (world.size() == 1) { - for (size_t i = 0; i < m; i++) { - res[i] = *std::max_element(input_.begin() + i * n, input_.begin() + (i + 1) * n); - } - return true; - } -#endif - - if (world.rank() == 0) { // base - size_t satellites = world.size() - 1; - - int proc_exit = 0; - int proc_wait = 1; - - if (m == 0 || n == 0) { - for (size_t i = 0; i < satellites; i++) { - world.send(i + 1, TAG_EXIT, &proc_exit, 1); - } - return true; - } - - int *arr = new int[m * n]; - int *maxes = new int[m]; - - std::copy(input_.begin(), input_.end(), arr); - - size_t row = 0; - while (row < m) { - for (size_t i = 0; i < std::min(satellites, m - row); i++) { - world.send(i + 1, TAG_EXIT, &proc_wait, 1); - world.send(i + 1, TAG_TOSAT, &arr[(row + i) * n], n); - } - - for (size_t i = 0; i < std::min(satellites, m - row); i++) { - world.recv(i + 1, TAG_TOBASE, &maxes[row + i], 1); - } - row += satellites; - } - for (size_t i = 0; i < satellites; i++) // close all satellite processes - world.send(i + 1, TAG_EXIT, &proc_exit, 1); - - res.assign(maxes, maxes + m); - - delete[] arr; - delete[] maxes; - } else { // satelleite - int *arr = new int[n]; - int proc_exit; - while (true) { - int out = INT_MIN; - world.recv(0, TAG_EXIT, &proc_exit, 1); - if (proc_exit == 0) break; - - world.recv(0, TAG_TOSAT, arr, n); - - for (size_t i = 0; i < n; i++) out = std::max(arr[i], out); - - world.send(0, TAG_TOBASE, &out, 1); - } - delete[] arr; - } - return true; -} - -bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - for (size_t i = 0; i < m; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res[i]; - } - } - return true; -} diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp deleted file mode 100644 index 886af56a69d..00000000000 --- a/tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp +++ /dev/null @@ -1,122 +0,0 @@ -#include - -#include -#include - -#include "seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp" - -TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_5_5) { - size_t n = 5; - size_t m = 5; - - // Create data - std::vector in(n * m); - std::vector out(m, 0); - std::vector maxes(m); - - std::iota(std::begin(in), std::end(in), 1); - for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(maxes, out); -} - -TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_10_5) { - size_t n = 10; - size_t m = 5; - - // Create data - std::vector in(n * m); - std::vector out(m, 0); - std::vector maxes(m); - - std::iota(std::begin(in), std::end(in), 1); - for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(maxes, out); -} - -TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_5_10) { - size_t n = 5; - size_t m = 10; - - // Create data - std::vector in(n * m); - std::vector out(m, 0); - std::vector maxes(m); - - std::iota(std::begin(in), std::end(in), 1); - for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(maxes, out); -} - -TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_EMPTY) { - size_t n = 0; - size_t m = 0; - - // Create data - std::vector in(n * m); - std::vector out(m, 0); - std::vector maxes(m); - - std::iota(std::begin(in), std::end(in), 1); - for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(maxes, out); -} diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp deleted file mode 100644 index ed042671b44..00000000000 --- a/tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace oturin_a_max_values_by_rows_matrix_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - size_t n = 0; - size_t m = 0; - std::vector input_; - std::vector res; -}; - -} // namespace oturin_a_max_values_by_rows_matrix_seq diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp deleted file mode 100644 index 84af1763195..00000000000 --- a/tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,84 +0,0 @@ -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp" - -TEST(oturin_a_max_values_by_rows_matrix_seq_perftest, test_pipeline_run) { - size_t n = 500; - size_t m = 500; - - // Create data - std::vector in(n * m, 0); - std::vector out(m, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(m, taskDataSeq->outputs_count.back()); -} - -TEST(oturin_a_max_values_by_rows_matrix_seq_perftest, test_task_run) { - size_t n = 500; - size_t m = 500; - - // Create data - std::vector in(n * m, 0); - std::vector out(m, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(n); - taskDataSeq->inputs_count.emplace_back(m); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(m, taskDataSeq->outputs_count.back()); -} diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp deleted file mode 100644 index 588586376cf..00000000000 --- a/tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp +++ /dev/null @@ -1,43 +0,0 @@ -#include "seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp" - -#include -#include -#include - -using namespace std::chrono_literals; - -bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - n = (size_t)(taskData->inputs_count[0]); - m = (size_t)(taskData->inputs_count[1]); - input_ = std::vector(n * m); - int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < n * m; i++) { - input_[i] = tmp_ptr[i]; - } - // Init values for output - res = std::vector(m, 0); - return true; -} - -bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::validation() { - internal_order_test(); - // Check elements count in i/o - // m & maxes: - return taskData->inputs_count[1] == taskData->outputs_count[0]; -} - -bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::run() { - internal_order_test(); - for (size_t i = 0; i < m; i++) res[i] = *std::max_element(input_.begin() + i * n, input_.begin() + (i + 1) * n); - return true; -} - -bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::post_processing() { - internal_order_test(); - for (size_t i = 0; i < m; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res[i]; - } - return true; -} From 30f4e141331f84d620d8e96ef4b23bdafd4e4228 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=95=D0=B3=D0=BE=D1=80=20=D0=91=D0=B5=D1=81=D1=81=D0=BE?= =?UTF-8?q?=D0=BD=D0=BE=D0=B2?= <113376898+nCUXe@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:29:25 +0300 Subject: [PATCH 085/155] =?UTF-8?q?=D0=91=D0=B5=D1=81=D1=81=D0=BE=D0=BD?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=95=D0=B3=D0=BE=D1=80.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=2021.=20=D0=98=D0=BD=D1=82=D0=B5=D0=B3=D1=80?= =?UTF-8?q?=D0=B8=D1=80=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B5=20=E2=80=93=20?= =?UTF-8?q?=D0=BC=D0=B5=D1=82=D0=BE=D0=B4=20=D0=9C=D0=BE=D0=BD=D1=82=D0=B5?= =?UTF-8?q?-=D0=9A=D0=B0=D1=80=D0=BB=D0=BE.=20(#108)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **SEQ:** Метод _pre_processing()_ извлекает границы интегрирования _a_ и _b_, а также количество случайных точек _num_points_ из входных данных. Интегрирование: Метод _run()_ выполняет следующие действия: - Инициализирует генератор случайных чисел. - Генерирует num_points случайных значений x в диапазоне [a, b]. - Для каждого x вычисляет значение функции и суммирует результаты. - Вычисляет итоговый результат интегрирования как произведение диапазона интегрирования (b - a) на среднее значение вычисленных функций. **MPI** Метод _run()_ выполняет следующие действия на каждом процессе: - Определяет количество точек _num_points_for_process_, которое необходимо обработать текущему процессу, учитывая общее количество процессов и возможный остаток. - Генерирует num_points_for_process случайных значений x в диапазоне [a, b]. - Вычисляет_ значение функции для каждого x и суммирует результаты в переменную _sum._ _Сбор результатов:_ - reduce() суммирует все результаты. - В корневом процессе вычисляется итоговый результат интегрирования как (b - a) * (res / num_points) --- .../func_tests/main.cpp | 265 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 ++++ .../perf_tests/main.cpp | 81 ++++++ .../src/ops_mpi.cpp | 95 +++++++ .../func_tests/main.cpp | 87 ++++++ .../include/ops_seq.hpp | 26 ++ .../perf_tests/main.cpp | 60 ++++ .../src/ops_seq.cpp | 36 +++ 8 files changed, 698 insertions(+) create mode 100644 tasks/mpi/bessonov_e_integration_monte_carlo/func_tests/main.cpp create mode 100644 tasks/mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp create mode 100644 tasks/mpi/bessonov_e_integration_monte_carlo/perf_tests/main.cpp create mode 100644 tasks/mpi/bessonov_e_integration_monte_carlo/src/ops_mpi.cpp create mode 100644 tasks/seq/bessonov_e_integration_monte_carlo/func_tests/main.cpp create mode 100644 tasks/seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp create mode 100644 tasks/seq/bessonov_e_integration_monte_carlo/perf_tests/main.cpp create mode 100644 tasks/seq/bessonov_e_integration_monte_carlo/src/ops_seq.cpp diff --git a/tasks/mpi/bessonov_e_integration_monte_carlo/func_tests/main.cpp b/tasks/mpi/bessonov_e_integration_monte_carlo/func_tests/main.cpp new file mode 100644 index 00000000000..d5f52f88155 --- /dev/null +++ b/tasks/mpi/bessonov_e_integration_monte_carlo/func_tests/main.cpp @@ -0,0 +1,265 @@ +#include + +#include +#include +#include +#include + +#include "mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp" + +TEST(bessonov_e_integration_monte_carlo_mpi, PositiveRangeTestMPI) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = 0.0; + double b = 1.0; + int num_points = 1000000; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(bessonov_e_integration_monte_carlo_mpi, NegativeRangeTestMPI) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = -1.0; + double b = 0.0; + int num_points = 100000; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(bessonov_e_integration_monte_carlo_mpi, VerySmallRangeTestMPI) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = 0.1; + double b = 0.11; + int num_points = 100000; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 3e-8); + } +} + +TEST(bessonov_e_integration_monte_carlo_mpi, LongRangeTestMPI) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = -10.0; + double b = 15.0; + int num_points = 100000; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e3); + } +} + +TEST(bessonov_e_integration_monte_carlo_mpi, VeryLongRangeTestMPI) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = -40.0; + double b = 50.0; + int num_points = 1000000; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 3e4); + } +} + +TEST(bessonov_e_integration_monte_carlo_mpi, EqualRangeTestMPI) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = -2.0; + double b = 2.0; + int num_points = 100000; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 2e-1); + } +} + +TEST(bessonov_e_integration_monte_carlo_mpi, RandomTestMPI) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<> dis(-8.0, 8.0); + double a = dis(gen); + double b = dis(gen); + + if (a > b) std::swap(a, b); + + if (a == b) b += 1.0; + + int num_points = 100000; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + + bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + + bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 2e1); + } +} \ No newline at end of file diff --git a/tasks/mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp b/tasks/mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp new file mode 100644 index 00000000000..ce0f4bd1c6e --- /dev/null +++ b/tasks/mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace bessonov_e_integration_monte_carlo_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double a, b; + int num_points; + static double exampl_func(double x) { return x * x * x; } + + private: + double res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double a, b; + int num_points; + static double exampl_func(double x) { return x * x * x; } + + private: + double res; + boost::mpi::communicator world; +}; + +} // namespace bessonov_e_integration_monte_carlo_mpi \ No newline at end of file diff --git a/tasks/mpi/bessonov_e_integration_monte_carlo/perf_tests/main.cpp b/tasks/mpi/bessonov_e_integration_monte_carlo/perf_tests/main.cpp new file mode 100644 index 00000000000..03b17694113 --- /dev/null +++ b/tasks/mpi/bessonov_e_integration_monte_carlo/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp" + +TEST(bessonov_e_integration_monte_carlo_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + double a = 0.0; + double b = 2.0; + int num_points = 100000000; + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(bessonov_e_integration_monte_carlo_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + double a = 0.0; + double b = 2.0; + int num_points = 100000000; + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double reference_result = 4.0; + ASSERT_NEAR(reference_result, global_result[0], 1e-1); + } +} \ No newline at end of file diff --git a/tasks/mpi/bessonov_e_integration_monte_carlo/src/ops_mpi.cpp b/tasks/mpi/bessonov_e_integration_monte_carlo/src/ops_mpi.cpp new file mode 100644 index 00000000000..b9fd6d74c15 --- /dev/null +++ b/tasks/mpi/bessonov_e_integration_monte_carlo/src/ops_mpi.cpp @@ -0,0 +1,95 @@ +#include "mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp" + +bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return (taskData->inputs.size() == 3 && taskData->outputs.size() == 1); +} + +bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + num_points = *reinterpret_cast(taskData->inputs[2]); + return true; +} + +bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential::run() { + internal_order_test(); + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<> dis(a, b); + + double sum = 0.0; + for (int i = 0; i < num_points; ++i) { + double x = dis(gen); + sum += exampl_func(x); + } + res = (b - a) * (sum / num_points); + return true; +} + +bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if ((taskData->inputs.size() != 3) || (taskData->outputs.size() != 1)) { + return false; + } + num_points = *reinterpret_cast(taskData->inputs[2]); + if (num_points <= 0) { + return false; + } + } + return true; +} + +bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + num_points = *reinterpret_cast(taskData->inputs[2]); + } + + return true; +} + +bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + boost::mpi::broadcast(world, a, 0); + boost::mpi::broadcast(world, b, 0); + boost::mpi::broadcast(world, num_points, 0); + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<> dis(a, b); + + int remainder = num_points % world.size(); + int num_points_for_process = num_points / world.size() + (world.rank() < remainder ? 1 : 0); + + double sum = 0.0; + for (int i = 0; i < num_points_for_process; ++i) { + double x = dis(gen); + sum += exampl_func(x); + } + + boost::mpi::reduce(world, sum, res, std::plus<>(), 0); + if (world.rank() == 0) { + res = (b - a) * res / num_points; + } + return true; +} + +bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res; + } + return true; +} diff --git a/tasks/seq/bessonov_e_integration_monte_carlo/func_tests/main.cpp b/tasks/seq/bessonov_e_integration_monte_carlo/func_tests/main.cpp new file mode 100644 index 00000000000..6327fd4a033 --- /dev/null +++ b/tasks/seq/bessonov_e_integration_monte_carlo/func_tests/main.cpp @@ -0,0 +1,87 @@ +#include + +#include +#include + +#include "seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp" + +TEST(bessonov_e_integration_monte_carlo_seq, PositiveRangeTest) { + double a = 0.0; + double b = 1.0; + int num_points = 1000000; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&num_points)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 0.25; + ASSERT_NEAR(output, expected_result, 1e-1); +} + +TEST(bessonov_e_integration_monte_carlo_seq, NegativeRangeTest) { + double a = -1.0; + double b = 0.0; + int num_points = 1000000; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&num_points)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = -0.25; + ASSERT_NEAR(output, expected_result, 1e-1); +} + +TEST(bessonov_e_integration_monte_carlo_seq, FullRangeTest) { + double a = -1.0; + double b = 2.0; + int num_points = 1000000; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&num_points)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 3.75; + ASSERT_NEAR(output, expected_result, 1e-1); +} + +TEST(bessonov_e_integration_monte_carlo_seq, InputSizeLessThan3) { + auto taskData = std::make_shared(); + double a = 0.0; + double b = 1.0; + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); + ASSERT_FALSE(task.validation()); +} + +TEST(bessonov_e_integration_monte_carlo_seq, OutputSizeLessThan1) { + auto taskData = std::make_shared(); + double a = 0.0; + double b = 1.0; + int num_points = 10000; + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&num_points)); + bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); + ASSERT_FALSE(task.validation()); +} \ No newline at end of file diff --git a/tasks/seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp b/tasks/seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp new file mode 100644 index 00000000000..7165f3ef353 --- /dev/null +++ b/tasks/seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace bessonov_e_integration_monte_carlo_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double a, b; + int num_points; + static double exampl_func(double x) { return x * x * x; } + + private: + double res{}; +}; + +} // namespace bessonov_e_integration_monte_carlo_seq \ No newline at end of file diff --git a/tasks/seq/bessonov_e_integration_monte_carlo/perf_tests/main.cpp b/tasks/seq/bessonov_e_integration_monte_carlo/perf_tests/main.cpp new file mode 100644 index 00000000000..6e843e530ff --- /dev/null +++ b/tasks/seq/bessonov_e_integration_monte_carlo/perf_tests/main.cpp @@ -0,0 +1,60 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp" + +TEST(bessonov_e_integration_monte_carlo_seq, TestPipelineRun) { + double a = 0.0; + double b = 2.0; + int num_points = 10000000; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&num_points)); + double output = 1.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + auto testTaskSequential = std::make_shared(taskData); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_result = 4.0; + ASSERT_NEAR(output, expected_result, 1e-1); +} + +TEST(bessonov_e_integration_monte_carlo_seq, TestTaskRun) { + double a = 0.0; + double b = 2.0; + int num_points = 10000000; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&num_points)); + double output = 1.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + auto testTaskSequential = std::make_shared(taskData); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_result = 4.0; + ASSERT_NEAR(output, expected_result, 1e-1); +} diff --git a/tasks/seq/bessonov_e_integration_monte_carlo/src/ops_seq.cpp b/tasks/seq/bessonov_e_integration_monte_carlo/src/ops_seq.cpp new file mode 100644 index 00000000000..eb63e632a6e --- /dev/null +++ b/tasks/seq/bessonov_e_integration_monte_carlo/src/ops_seq.cpp @@ -0,0 +1,36 @@ +#include "seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp" + +bool bessonov_e_integration_monte_carlo_seq::TestTaskSequential::validation() { + internal_order_test(); + return (taskData->inputs.size() == 3 && taskData->outputs.size() == 1); +} + +bool bessonov_e_integration_monte_carlo_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + num_points = *reinterpret_cast(taskData->inputs[2]); + return true; +} + +bool bessonov_e_integration_monte_carlo_seq::TestTaskSequential::run() { + internal_order_test(); + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<> dis(a, b); + + double sum = 0.0; + for (int i = 0; i < num_points; ++i) { + double x = dis(gen); + sum += exampl_func(x); + } + + res = (b - a) * (sum / num_points); + return true; +} + +bool bessonov_e_integration_monte_carlo_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From d30d40a1e89b3832169b10bde3515db09276ca76 Mon Sep 17 00:00:00 2001 From: maximchhek <119969971+maximchhek@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:30:27 +0300 Subject: [PATCH 086/155] =?UTF-8?q?=D0=A8=D0=BB=D1=8F=D0=BA=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=2017.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C?= =?UTF-8?q?=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B0?= =?UTF-8?q?=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B=20(#109)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Этапы последовательной части: 1.Подготовка структуры для матрицы и результата. 2.Заполнение матрицы. 3.Поиск минимумов в каждой строке с обновлением минимального значения. 3.Сохранение найденных значений в выходной массив. Этапы MPI части: 1.Главный процесс загружает матрицу и распределяет строки между всеми процессами. 2.Каждый процесс выполняет поиск минимальных значений в своём наборе строк. 3.Все процессы отправляют свои результаты главному процессу. 4.Главный процесс собирает результаты и сохраняет их в конечный массив. --- .../func_tests/main.cpp | 286 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 +++ .../perf_tests/main.cpp | 78 +++++ .../src/ops_mpi.cpp | 184 +++++++++++ .../func_tests/main.cpp | 217 +++++++++++++ .../include/ops_seq.hpp | 27 ++ .../perf_tests/main.cpp | 88 ++++++ .../src/ops_seq.cpp | 79 +++++ 8 files changed, 1006 insertions(+) create mode 100644 tasks/mpi/shlyakov_m_min_value_of_row/func_tests/main.cpp create mode 100644 tasks/mpi/shlyakov_m_min_value_of_row/include/ops_mpi.hpp create mode 100644 tasks/mpi/shlyakov_m_min_value_of_row/perf_tests/main.cpp create mode 100644 tasks/mpi/shlyakov_m_min_value_of_row/src/ops_mpi.cpp create mode 100644 tasks/seq/shlyakov_m_min_value_of_row/func_tests/main.cpp create mode 100644 tasks/seq/shlyakov_m_min_value_of_row/include/ops_seq.hpp create mode 100644 tasks/seq/shlyakov_m_min_value_of_row/perf_tests/main.cpp create mode 100644 tasks/seq/shlyakov_m_min_value_of_row/src/ops_seq.cpp diff --git a/tasks/mpi/shlyakov_m_min_value_of_row/func_tests/main.cpp b/tasks/mpi/shlyakov_m_min_value_of_row/func_tests/main.cpp new file mode 100644 index 00000000000..1707fa73852 --- /dev/null +++ b/tasks/mpi/shlyakov_m_min_value_of_row/func_tests/main.cpp @@ -0,0 +1,286 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/shlyakov_m_min_value_of_row/include/ops_mpi.hpp" + +TEST(shlyakov_m_min_value_of_row_mpi, test_validation) { + boost::mpi::communicator world; + const int sz_row = 100; + const int sz_col = 100; + + std::vector> main_matr; + std::vector main_min(sz_row, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + main_matr = shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataPar->inputs_count = {sz_row, sz_col}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(main_min.data())); + taskDataPar->outputs_count.emplace_back(main_min.size()); + } + + shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); +} + +TEST(shlyakov_m_min_value_of_row_mpi, test_pre_processing) { + boost::mpi::communicator world; + const int sz_row = 100; + const int sz_col = 100; + + std::vector> main_matr; + std::vector main_min(sz_row, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + main_matr = shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataPar->inputs_count = {sz_row, sz_col}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(main_min.data())); + taskDataPar->outputs_count.emplace_back(main_min.size()); + } + + shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + ASSERT_TRUE(testMpiTaskParallel.pre_processing()); +} + +TEST(shlyakov_m_min_value_of_row_mpi, test_run) { + boost::mpi::communicator world; + const int sz_row = 100; + const int sz_col = 100; + + std::vector> main_matr; + std::vector main_min(sz_row, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + main_matr = shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataPar->inputs_count = {sz_row, sz_col}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(main_min.data())); + taskDataPar->outputs_count.emplace_back(main_min.size()); + } + + shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + ASSERT_TRUE(testMpiTaskParallel.run()); +} + +TEST(shlyakov_m_min_value_of_row_mpi, test_post_processing) { + boost::mpi::communicator world; + const int sz_row = 100; + const int sz_col = 100; + + std::vector> main_matr; + std::vector main_min(sz_row, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + main_matr = shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataPar->inputs_count = {sz_row, sz_col}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(main_min.data())); + taskDataPar->outputs_count.emplace_back(main_min.size()); + } + + shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + ASSERT_TRUE(testMpiTaskParallel.post_processing()); +} + +TEST(shlyakov_m_min_value_of_row_mpi, test_with_square_matr) { + boost::mpi::communicator world; + const int sz_row = 100; + const int sz_col = 100; + + std::vector> main_matr; + std::vector main_min(sz_row, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + main_matr = shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataPar->inputs_count = {sz_row, sz_col}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(main_min.data())); + taskDataPar->outputs_count.emplace_back(main_min.size()); + } + + shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector seq_min(sz_row, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataSeq->inputs_count = {sz_row, sz_col}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min.data())); + taskDataSeq->outputs_count.emplace_back(seq_min.size()); + + shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < sz_row; i++) { + ASSERT_EQ(main_min[i], INT_MIN); + } + } +} + +TEST(shlyakov_m_min_value_of_row_mpi, test_with_not_square_matr) { + boost::mpi::communicator world; + const int sz_row = 400; + const int sz_col = 100; + + std::vector> main_matr; + std::vector main_min(sz_row, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + main_matr = shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataPar->inputs_count = {sz_row, sz_col}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(main_min.data())); + taskDataPar->outputs_count.emplace_back(main_min.size()); + } + + shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector seq_min(sz_row, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataSeq->inputs_count = {sz_row, sz_col}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min.data())); + taskDataSeq->outputs_count.emplace_back(seq_min.size()); + + shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < sz_row; i++) { + ASSERT_EQ(main_min[i], INT_MIN); + } + } +} + +TEST(shlyakov_m_min_value_of_row_mpi, test_with_large_matr) { + boost::mpi::communicator world; + const int sz_row = 5000; + const int sz_col = 5000; + + std::vector> main_matr; + std::vector main_min(sz_row, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + main_matr = shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataPar->inputs_count = {sz_row, sz_col}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(main_min.data())); + taskDataPar->outputs_count.emplace_back(main_min.size()); + } + + shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector seq_min(sz_row, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < main_matr.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(main_matr[i].data())); + + taskDataSeq->inputs_count = {sz_row, sz_col}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min.data())); + taskDataSeq->outputs_count.emplace_back(seq_min.size()); + + shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < sz_row; i++) { + ASSERT_EQ(main_min[i], INT_MIN); + } + } +} + +TEST(shlyakov_m_min_value_of_row_mpi, test_with_empty_input) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int sz_row = 100; + const int sz_col = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector v_res(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} \ No newline at end of file diff --git a/tasks/mpi/shlyakov_m_min_value_of_row/include/ops_mpi.hpp b/tasks/mpi/shlyakov_m_min_value_of_row/include/ops_mpi.hpp new file mode 100644 index 00000000000..15f5b21e172 --- /dev/null +++ b/tasks/mpi/shlyakov_m_min_value_of_row/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace shlyakov_m_min_value_of_row_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector> get_random_matr(int sz_row, int sz_col); + + private: + std::vector> input_; + std::vector res_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector> local_input_; + std::vector res_; + boost::mpi::communicator world; +}; + +} // namespace shlyakov_m_min_value_of_row_mpi \ No newline at end of file diff --git a/tasks/mpi/shlyakov_m_min_value_of_row/perf_tests/main.cpp b/tasks/mpi/shlyakov_m_min_value_of_row/perf_tests/main.cpp new file mode 100644 index 00000000000..5d64bb28b71 --- /dev/null +++ b/tasks/mpi/shlyakov_m_min_value_of_row/perf_tests/main.cpp @@ -0,0 +1,78 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/shlyakov_m_min_value_of_row/include/ops_mpi.hpp" + +TEST(shlyakov_m_min_value_of_row_mpi, test_pipeline_run_min) { + boost::mpi::communicator world; + std::vector> main_matr; + std::vector main_min; + + std::shared_ptr taskDataPar = std::make_shared(); + int sz_row; + int sz_col; + + if (world.rank() == 0) { + sz_row = 5000; + sz_col = 5000; + main_matr = shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + main_min.resize(sz_row, INT_MAX); + + for (auto& row : main_matr) taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + + taskDataPar->inputs_count.emplace_back(sz_row); + taskDataPar->inputs_count.emplace_back(sz_col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(main_min.data())); + taskDataPar->outputs_count.emplace_back(main_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < main_min.size(); ++i) ASSERT_EQ(main_min[i], INT_MIN); + } +} + +TEST(shlyakov_m_min_value_of_row_mpi_perf, test_task_run_min) { + boost::mpi::communicator world; + std::vector> main_matr; + std::vector main_min; + + std::shared_ptr taskDataPar = std::make_shared(); + int sz_row; + int sz_col; + + if (world.rank() == 0) { + sz_row = 5000; + sz_col = 5000; + main_matr = shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(sz_row, sz_col); + main_min.resize(sz_row, INT_MAX); + + for (auto& row : main_matr) taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + + taskDataPar->inputs_count.emplace_back(sz_row); + taskDataPar->inputs_count.emplace_back(sz_col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(main_min.data())); + taskDataPar->outputs_count.emplace_back(main_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < main_min.size(); ++i) ASSERT_EQ(main_min[i], INT_MIN); + } +} \ No newline at end of file diff --git a/tasks/mpi/shlyakov_m_min_value_of_row/src/ops_mpi.cpp b/tasks/mpi/shlyakov_m_min_value_of_row/src/ops_mpi.cpp new file mode 100644 index 00000000000..ef582abcb63 --- /dev/null +++ b/tasks/mpi/shlyakov_m_min_value_of_row/src/ops_mpi.cpp @@ -0,0 +1,184 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/shlyakov_m_min_value_of_row/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + size_t sz_row = taskData->inputs_count[0]; + size_t sz_col = taskData->inputs_count[1]; + input_.resize(sz_row, std::vector(sz_col)); + + for (size_t i = 0; i < sz_row; i++) { + auto* matr = reinterpret_cast(taskData->inputs[i]); + for (size_t j = 0; j < sz_col; j++) { + input_[i][j] = matr[j]; + } + } + res_.resize(sz_row); + + return true; +} + +bool shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + if (((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0)) && + (taskData->outputs_count[0] == taskData->inputs_count[0])) + return (true); + + return (false); +} + +bool shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::run() { + internal_order_test(); + int min; + size_t sz_row = input_.size(); + size_t sz_col = input_[0].size(); + + for (size_t i = 0; i < sz_row; i++) { + min = input_[i][0]; + for (size_t j = 1; j < sz_col; j++) { + if (input_[i][j] < min) { + min = input_[i][j]; + } + } + res_[i] = min; + } + + return true; +} + +bool shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + int* result = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + result[i] = res_[i]; + } + + return true; +} + +bool shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + return true; +} + +bool shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + if (((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0)) && + (taskData->outputs_count[0] == taskData->inputs_count[0])) + return (true); + return (false); + } + + return true; +} + +bool shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + int sz_row = 0; + int sz_col = 0; + + if (world.rank() == 0) { + sz_row = taskData->inputs_count[0]; + sz_col = taskData->inputs_count[1]; + } + + broadcast(world, sz_row, 0); + broadcast(world, sz_col, 0); + + int del = sz_row / world.size(); + int ex = sz_row % world.size(); + + if (world.rank() == 0) { + input_.resize(sz_row, std::vector(sz_col)); + + for (int i = 0; i < sz_row; i++) { + int* matr = reinterpret_cast(taskData->inputs[i]); + input_[i].assign(matr, matr + sz_col); + } + + for (int proc = 1; proc < world.size(); proc++) { + int start_row = proc * del + std::min(proc, ex); + int num_rows = del + (proc < ex ? 1 : 0); + for (int r = start_row; r < start_row + num_rows; r++) world.send(proc, 0, input_[r].data(), sz_col); + } + } + + int local_rows = del + (world.rank() < ex ? 1 : 0); + + local_input_.resize(local_rows, std::vector(sz_col)); + + if (world.rank() == 0) + std::copy(input_.begin(), input_.begin() + local_rows, local_input_.begin()); + else { + for (int r = 0; r < local_rows; r++) world.recv(0, 0, local_input_[r].data(), sz_col); + } + + res_.resize(sz_row); + + std::vector local_mins(local_input_.size(), INT_MAX); + for (size_t i = 0; i < local_input_.size(); i++) { + for (const auto& val : local_input_[i]) { + local_mins[i] = std::min(local_mins[i], val); + } + } + + if (world.rank() == 0) { + int c_ind = 0; + std::copy(local_mins.begin(), local_mins.end(), res_.begin()); + c_ind += local_mins.size(); + + for (int proc = 1; proc < world.size(); proc++) { + int local_sz; + world.recv(proc, 0, &local_sz, 1); + std::vector loc_res_(local_sz); + world.recv(proc, 0, loc_res_.data(), local_sz); + copy(loc_res_.begin(), loc_res_.end(), res_.data() + c_ind); + c_ind += loc_res_.size(); + } + } else { + int loc_res__size = (int)local_mins.size(); + world.send(0, 0, &loc_res__size, 1); + world.send(0, 0, local_mins.data(), loc_res__size); + } + + return true; +} + +bool shlyakov_m_min_value_of_row_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + std::copy(res_.begin(), res_.end(), output_matrix); + } + + return true; +} + +std::vector> shlyakov_m_min_value_of_row_mpi::TestMPITaskSequential::get_random_matr(int sz_row, + int sz_col) { + std::vector rand_vec(sz_row); + std::vector> rand_matr(sz_row, std::vector(sz_col)); + + for (auto& row : rand_matr) { + for (auto& el : rand_vec) el = std::rand() % (1001) - 500; + row = rand_vec; + row[std::rand() % sz_col] = INT_MIN; + } + + return rand_matr; +} \ No newline at end of file diff --git a/tasks/seq/shlyakov_m_min_value_of_row/func_tests/main.cpp b/tasks/seq/shlyakov_m_min_value_of_row/func_tests/main.cpp new file mode 100644 index 00000000000..453aa0fe4bf --- /dev/null +++ b/tasks/seq/shlyakov_m_min_value_of_row/func_tests/main.cpp @@ -0,0 +1,217 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/shlyakov_m_min_value_of_row/include/ops_seq.hpp" + +TEST(shlyakov_m_min_value_of_row_seq, test_validation) { + const int sz_row = 100; + const int sz_col = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + shlyakov_m_min_value_of_row_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> rand_matr = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(sz_row, sz_col); + + for (auto& row : rand_matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector result_vex(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_vex.data())); + taskDataSeq->outputs_count.emplace_back(result_vex.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); +} + +TEST(shlyakov_m_min_value_of_row_seq, test_pre_processing) { + const int sz_row = 100; + const int sz_col = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + shlyakov_m_min_value_of_row_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> rand_matr = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(sz_row, sz_col); + + for (auto& row : rand_matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector result_vex(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_vex.data())); + taskDataSeq->outputs_count.emplace_back(result_vex.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); +} + +TEST(shlyakov_m_min_value_of_row_seq, test_run) { + const int sz_row = 100; + const int sz_col = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + shlyakov_m_min_value_of_row_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> rand_matr = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(sz_row, sz_col); + + for (auto& row : rand_matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector result_vex(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_vex.data())); + taskDataSeq->outputs_count.emplace_back(result_vex.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + ASSERT_TRUE(testTaskSequential.run()); +} + +TEST(shlyakov_m_min_value_of_row_seq, test_post_processing) { + const int sz_row = 100; + const int sz_col = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + shlyakov_m_min_value_of_row_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> rand_matr = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(sz_row, sz_col); + + for (auto& row : rand_matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector result_vex(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_vex.data())); + taskDataSeq->outputs_count.emplace_back(result_vex.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + ASSERT_TRUE(testTaskSequential.post_processing()); +} + +TEST(shlyakov_m_min_value_of_row_seq, test_eq_result_square_matr) { + const int sz_row = 100; + const int sz_col = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + shlyakov_m_min_value_of_row_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> rand_matr = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(sz_row, sz_col); + + for (auto& row : rand_matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector result_vex(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_vex.data())); + taskDataSeq->outputs_count.emplace_back(result_vex.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + for (int i = 0; i < sz_row; i++) { + ASSERT_EQ(result_vex[i], INT_MIN); + } +} + +TEST(shlyakov_m_min_value_of_row_seq, test_eq_result_notsquare_matr) { + const int sz_row = 150; + const int sz_col = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + shlyakov_m_min_value_of_row_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> rand_matr = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(sz_row, sz_col); + + for (auto& row : rand_matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector result_vex(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_vex.data())); + taskDataSeq->outputs_count.emplace_back(result_vex.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + for (int i = 0; i < sz_row; i++) { + ASSERT_EQ(result_vex[i], INT_MIN); + } +} + +TEST(shlyakov_m_min_value_of_row_seq, test_validation_uncorrect_input) { + const int sz_row = 0; + const int sz_col = 0; + + std::shared_ptr taskDataSeq = std::make_shared(); + + shlyakov_m_min_value_of_row_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> rand_matr = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(sz_row, sz_col); + + for (auto& row : rand_matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector result_vex(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result_vex.data())); + taskDataSeq->outputs_count.emplace_back(result_vex.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(shlyakov_m_min_value_of_row_seq, test_then_input_sz_not_eq_output_sz) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + shlyakov_m_min_value_of_row_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows - 1, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} \ No newline at end of file diff --git a/tasks/seq/shlyakov_m_min_value_of_row/include/ops_seq.hpp b/tasks/seq/shlyakov_m_min_value_of_row/include/ops_seq.hpp new file mode 100644 index 00000000000..a4b2eff2a7a --- /dev/null +++ b/tasks/seq/shlyakov_m_min_value_of_row/include/ops_seq.hpp @@ -0,0 +1,27 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace shlyakov_m_min_value_of_row_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) { + std::srand(std::time(nullptr)); + } + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector> get_random_matr(int sz_row, int sz_col); + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace shlyakov_m_min_value_of_row_seq \ No newline at end of file diff --git a/tasks/seq/shlyakov_m_min_value_of_row/perf_tests/main.cpp b/tasks/seq/shlyakov_m_min_value_of_row/perf_tests/main.cpp new file mode 100644 index 00000000000..7c729837e5a --- /dev/null +++ b/tasks/seq/shlyakov_m_min_value_of_row/perf_tests/main.cpp @@ -0,0 +1,88 @@ +// Copyright 2023 Nesterov Alexander + +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/shlyakov_m_min_value_of_row/include/ops_seq.hpp" + +TEST(shlyakov_m_min_value_of_row_seq, test_pipeline_run) { + const int sz_row = 10000; + const int sz_col = 10000; + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matrix_rnd = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(sz_row, sz_col); + + for (auto& row : matrix_rnd) taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector v_res(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Set the number of runs as needed + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < sz_row; i++) ASSERT_EQ(v_res[i], INT_MIN); +} + +TEST(shlyakov_m_min_value_of_row_seq, test_task_run) { + const int sz_row = 10000; + const int sz_col = 10000; + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matrix_rnd = + shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(sz_row, sz_col); + + for (auto& row : matrix_rnd) taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + + taskDataSeq->inputs_count.emplace_back(sz_row); + taskDataSeq->inputs_count.emplace_back(sz_col); + + std::vector v_res(sz_row, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < sz_row; i++) ASSERT_EQ(v_res[i], INT_MIN); +} \ No newline at end of file diff --git a/tasks/seq/shlyakov_m_min_value_of_row/src/ops_seq.cpp b/tasks/seq/shlyakov_m_min_value_of_row/src/ops_seq.cpp new file mode 100644 index 00000000000..ac26f143ddd --- /dev/null +++ b/tasks/seq/shlyakov_m_min_value_of_row/src/ops_seq.cpp @@ -0,0 +1,79 @@ +// Copyright 2024 Nesterov Alexander +// shlyakov_m_min_value_of_row +#include "seq/shlyakov_m_min_value_of_row/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool shlyakov_m_min_value_of_row_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + size_t sz_row = taskData->inputs_count[0]; + size_t sz_col = taskData->inputs_count[1]; + input_.resize(sz_row, std::vector(sz_col)); + + for (size_t i = 0; i < sz_row; i++) { + auto* matr = reinterpret_cast(taskData->inputs[i]); + for (size_t j = 0; j < sz_col; j++) { + input_[i][j] = matr[j]; + } + } + res_.resize(sz_row); + + return true; +} + +bool shlyakov_m_min_value_of_row_seq::TestTaskSequential::validation() { + internal_order_test(); + + if (((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0)) && + (taskData->outputs_count[0] == taskData->inputs_count[0])) + return (true); + + return (false); +} + +bool shlyakov_m_min_value_of_row_seq::TestTaskSequential::run() { + internal_order_test(); + int min; + size_t sz_row = input_.size(); + size_t sz_col = input_[0].size(); + + for (size_t i = 0; i < sz_row; i++) { + min = input_[i][0]; + for (size_t j = 1; j < sz_col; j++) { + if (input_[i][j] < min) { + min = input_[i][j]; + } + } + res_[i] = min; + } + + return true; +} + +bool shlyakov_m_min_value_of_row_seq::TestTaskSequential::post_processing() { + internal_order_test(); + int* result = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + result[i] = res_[i]; + } + + return true; +} + +std::vector> shlyakov_m_min_value_of_row_seq::TestTaskSequential::get_random_matr(int sz_row, + int sz_col) { + std::vector rand_vec(sz_row); + std::vector> rand_matr(sz_row, std::vector(sz_col)); + + for (auto& row : rand_matr) { + for (auto& el : rand_vec) el = std::rand() % (1001) - 500; + row = rand_vec; + row[std::rand() % sz_col] = INT_MIN; + } + + return rand_matr; +} \ No newline at end of file From 2eb016eb64efc85c7ed870929c9ac7b2618287ab Mon Sep 17 00:00:00 2001 From: Maksim Savchenko <113035529+MaxikGuy@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:31:27 +0300 Subject: [PATCH 087/155] =?UTF-8?q?=D0=A1=D0=B0=D0=B2=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=BA=D0=BE=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=2014.=20=D0=9C=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0?= =?UTF-8?q?=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#?= =?UTF-8?q?110)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание последовательного решения:** - Матрица хранится как одномерный вектор с общим числом элементов rows*columns. - Выполняется последовательный обход элементов матрицы, в процессе которого находится минимальное значение. **Описание решения с использованием MPI:** - Матрица хранится как одномерный вектор с общим числом элементов rows*columns. - Матрица делится на части, соответствующие количеству процессов. - Каждый процесс получает свой сегмент данных и выполняет поиск минимального значения внутри него. - Финальное минимальное значение вычисляется через операцию reduce, которая проходит по промежуточным результатам всех процессов и находит наименьшее из них. --- .../func_tests/main_savchenko.cpp | 525 ++++++++++++++++++ .../include/ops_mpi_savchenko.hpp | 48 ++ .../perf_tests/main_savchenko.cpp | 133 +++++ .../src/ops_mpi_savchenko.cpp | 108 ++++ .../func_tests/main_savchenko.cpp | 389 +++++++++++++ .../include/ops_seq_savchenko.hpp | 25 + .../perf_tests/main_savchenko.cpp | 124 +++++ .../src/ops_seq_savchenko.cpp | 42 ++ 8 files changed, 1394 insertions(+) create mode 100644 tasks/mpi/savchenko_m_min_matrix/func_tests/main_savchenko.cpp create mode 100644 tasks/mpi/savchenko_m_min_matrix/include/ops_mpi_savchenko.hpp create mode 100644 tasks/mpi/savchenko_m_min_matrix/perf_tests/main_savchenko.cpp create mode 100644 tasks/mpi/savchenko_m_min_matrix/src/ops_mpi_savchenko.cpp create mode 100644 tasks/seq/savchenko_m_min_matrix/func_tests/main_savchenko.cpp create mode 100644 tasks/seq/savchenko_m_min_matrix/include/ops_seq_savchenko.hpp create mode 100644 tasks/seq/savchenko_m_min_matrix/perf_tests/main_savchenko.cpp create mode 100644 tasks/seq/savchenko_m_min_matrix/src/ops_seq_savchenko.cpp diff --git a/tasks/mpi/savchenko_m_min_matrix/func_tests/main_savchenko.cpp b/tasks/mpi/savchenko_m_min_matrix/func_tests/main_savchenko.cpp new file mode 100644 index 00000000000..b3b408de9d5 --- /dev/null +++ b/tasks/mpi/savchenko_m_min_matrix/func_tests/main_savchenko.cpp @@ -0,0 +1,525 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include +#include + +#include "mpi/savchenko_m_min_matrix/include/ops_mpi_savchenko.hpp" + +std::vector getRandomMatrix(size_t rows, size_t columns, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + + // Forming a random matrix + std::vector matrix(rows * columns); + for (size_t i = 0; i < rows; i++) { + for (size_t j = 0; j < columns; j++) { + matrix[i * columns + j] = min + gen() % (max - min + 1); + } + } + + return matrix; +} + +TEST(savchenko_m_min_matrix_mpi, test_min_10x10) { + const int rows = 10; + const int columns = 10; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + savchenko_m_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_100x10) { + const int rows = 100; + const int columns = 10; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + savchenko_m_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_10x100) { + const int rows = 10; + const int columns = 100; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + savchenko_m_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_100x100) { + const int rows = 100; + const int columns = 100; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + savchenko_m_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_100x1) { + const int rows = 100; + const int columns = 1; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + savchenko_m_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_1000x1) { + const int rows = 1000; + const int columns = 1; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + savchenko_m_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_1x100) { + const int rows = 1; + const int columns = 100; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + savchenko_m_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_1x1000) { + const int rows = 1; + const int columns = 1000; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + savchenko_m_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_0x0) { + const int rows = 0; + const int columns = 0; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + if (world.rank() == 0) { + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_0x10) { + const int rows = 0; + const int columns = 10; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + if (world.rank() == 0) { + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_min_10x0) { + const int rows = 10; + const int columns = 0; + const int gen_min = -1000; + const int gen_max = 1000; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + savchenko_m_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + if (world.rank() == 0) { + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} \ No newline at end of file diff --git a/tasks/mpi/savchenko_m_min_matrix/include/ops_mpi_savchenko.hpp b/tasks/mpi/savchenko_m_min_matrix/include/ops_mpi_savchenko.hpp new file mode 100644 index 00000000000..6b2ff0576e2 --- /dev/null +++ b/tasks/mpi/savchenko_m_min_matrix/include/ops_mpi_savchenko.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace savchenko_m_min_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix; + int res{}; + size_t rows, columns; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix, local_matrix; + int res, local_res; + size_t rows, columns; + + boost::mpi::communicator world; +}; + +} // namespace savchenko_m_min_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/savchenko_m_min_matrix/perf_tests/main_savchenko.cpp b/tasks/mpi/savchenko_m_min_matrix/perf_tests/main_savchenko.cpp new file mode 100644 index 00000000000..2ec50d3612d --- /dev/null +++ b/tasks/mpi/savchenko_m_min_matrix/perf_tests/main_savchenko.cpp @@ -0,0 +1,133 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/savchenko_m_min_matrix/include/ops_mpi_savchenko.hpp" + +std::vector getRandomMatrix(size_t rows, size_t columns, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + + // Forming a random matrix + std::vector matrix(rows * columns); + for (size_t i = 0; i < rows; i++) { + for (size_t j = 0; j < columns; j++) { + matrix[i * columns + j] = min + gen() % (max - min + 1); + } + } + + return matrix; +} + +TEST(savchenko_m_min_matrix_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_min(1, INT_MAX); + int ref = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int rows = 5000; + const int columns = 5000; + const int gen_min = -1000; + const int gen_max = 1000; + + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + int index = gen() % (rows * columns); + global_matrix[index] = ref; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_min[0]); + } +} + +TEST(savchenko_m_min_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, INT_MAX); + int ref = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int rows = 5000; + const int columns = 5000; + const int gen_min = -1000; + const int gen_max = 1000; + + global_matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + int index = gen() % (rows * columns); + global_matrix[index] = ref; + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_sum[0]); + } +} diff --git a/tasks/mpi/savchenko_m_min_matrix/src/ops_mpi_savchenko.cpp b/tasks/mpi/savchenko_m_min_matrix/src/ops_mpi_savchenko.cpp new file mode 100644 index 00000000000..c94a526709b --- /dev/null +++ b/tasks/mpi/savchenko_m_min_matrix/src/ops_mpi_savchenko.cpp @@ -0,0 +1,108 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/savchenko_m_min_matrix/include/ops_mpi_savchenko.hpp" + +#include +#include +#include +#include + +// Task Sequential + +bool savchenko_m_min_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + rows = taskData->inputs_count[0]; + columns = taskData->inputs_count[1]; + matrix = std::vector(rows * columns); + + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + rows * columns, matrix.begin()); + + // Init value for output + res = INT_MAX; + return true; +} + +bool savchenko_m_min_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0; +} + +bool savchenko_m_min_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + res = *std::min_element(matrix.begin(), matrix.end()); + return true; +} + +bool savchenko_m_min_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +// Task Parallel + +bool savchenko_m_min_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + // Init matrix + rows = taskData->inputs_count[0]; + columns = taskData->inputs_count[1]; + matrix = std::vector(rows * columns); + + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + rows * columns, matrix.begin()); + } + + // Init value for output + res = INT_MAX; + return true; +} + +bool savchenko_m_min_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0; + } + return true; +} + +bool savchenko_m_min_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] * taskData->inputs_count[1] / world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, matrix.data() + delta * proc, delta); + } + } + + local_matrix = std::vector(delta); + if (world.rank() == 0) { + local_matrix = std::vector(matrix.begin(), matrix.begin() + delta); + } else { + world.recv(0, 0, local_matrix.data(), delta); + } + + local_res = *std::min_element(local_matrix.begin(), local_matrix.end()); + reduce(world, local_res, res, boost::mpi::minimum(), 0); + + return true; +} + +bool savchenko_m_min_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/savchenko_m_min_matrix/func_tests/main_savchenko.cpp b/tasks/seq/savchenko_m_min_matrix/func_tests/main_savchenko.cpp new file mode 100644 index 00000000000..d354ea3cf86 --- /dev/null +++ b/tasks/seq/savchenko_m_min_matrix/func_tests/main_savchenko.cpp @@ -0,0 +1,389 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "seq/savchenko_m_min_matrix/include/ops_seq_savchenko.hpp" + +std::vector getRandomMatrix(size_t rows, size_t columns, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + + // Forming a random matrix + std::vector matrix(rows * columns); + for (size_t i = 0; i < rows; i++) { + for (size_t j = 0; j < columns; j++) { + matrix[i * columns + j] = min + gen() % (max - min + 1); + } + } + + return matrix; +} + +TEST(savchenko_m_min_matrix_seq, test_min_10x10) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + const int rows = 10; + const int columns = 10; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + const int index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, min_value[0]); +} + +TEST(savchenko_m_min_matrix_seq, test_min_100x10) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + const int rows = 100; + const int columns = 10; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + const int index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, min_value[0]); +} + +TEST(savchenko_m_min_matrix_seq, test_min_10x100) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + const int rows = 10; + const int columns = 100; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + const int index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, min_value[0]); +} + +TEST(savchenko_m_min_matrix_seq, test_min_100x100) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + const int rows = 100; + const int columns = 100; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + const int index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, min_value[0]); +} + +TEST(savchenko_m_min_matrix_seq, test_min_100x1) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + const int rows = 100; + const int columns = 1; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + const int index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, min_value[0]); +} + +TEST(savchenko_m_min_matrix_seq, test_min_1000x1) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + const int rows = 1000; + const int columns = 1; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + const int index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, min_value[0]); +} + +TEST(savchenko_m_min_matrix_seq, test_min_1x100) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + const int rows = 1; + const int columns = 100; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + const int index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, min_value[0]); +} + +TEST(savchenko_m_min_matrix_seq, test_min_1x1000) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + const int rows = 1; + const int columns = 1000; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + const int index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, min_value[0]); +} + +TEST(savchenko_m_min_matrix_seq, test_min_0x0) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + // Create data + const int rows = 0; + const int columns = 0; + const int gen_min = -1000; + const int gen_max = 1000; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(savchenko_m_min_matrix_seq, test_min_0x10) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + // Create data + const int rows = 0; + const int columns = 10; + const int gen_min = -1000; + const int gen_max = 1000; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(savchenko_m_min_matrix_seq, test_min_10x0) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + // Create data + const int rows = 10; + const int columns = 0; + const int gen_min = -1000; + const int gen_max = 1000; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + savchenko_m_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} \ No newline at end of file diff --git a/tasks/seq/savchenko_m_min_matrix/include/ops_seq_savchenko.hpp b/tasks/seq/savchenko_m_min_matrix/include/ops_seq_savchenko.hpp new file mode 100644 index 00000000000..8ed289c12cf --- /dev/null +++ b/tasks/seq/savchenko_m_min_matrix/include/ops_seq_savchenko.hpp @@ -0,0 +1,25 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace savchenko_m_min_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix{}; + int res{}; + size_t rows, columns; +}; + +} // namespace savchenko_m_min_matrix_seq \ No newline at end of file diff --git a/tasks/seq/savchenko_m_min_matrix/perf_tests/main_savchenko.cpp b/tasks/seq/savchenko_m_min_matrix/perf_tests/main_savchenko.cpp new file mode 100644 index 00000000000..d271e4f5eb6 --- /dev/null +++ b/tasks/seq/savchenko_m_min_matrix/perf_tests/main_savchenko.cpp @@ -0,0 +1,124 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/savchenko_m_min_matrix/include/ops_seq_savchenko.hpp" + +std::vector getRandomMatrix(size_t rows, size_t columns, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + + // Forming a random matrix + std::vector matrix(rows * columns); + for (size_t i = 0; i < rows; i++) { + for (size_t j = 0; j < columns; j++) { + matrix[i * columns + j] = min + gen() % (max - min + 1); + } + } + + return matrix; +} + +TEST(savchenko_m_min_matrix_seq, test_pipeline_run) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + std::shared_ptr taskDataSeq = std::make_shared(); + const size_t rows = 5000; + const size_t columns = 5000; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + size_t index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, min_value[0]); +} + +TEST(savchenko_m_min_matrix_seq, test_task_run) { + std::vector matrix; + std::vector min_value(1, INT_MAX); + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create data + std::shared_ptr taskDataSeq = std::make_shared(); + const size_t rows = 5000; + const size_t columns = 5000; + const int gen_min = -1000; + const int gen_max = 1000; + const int ref = INT_MIN; + + matrix = getRandomMatrix(rows, columns, gen_min, gen_max); + size_t index = gen() % (rows * columns); + matrix[index] = INT_MIN; + + // Create TaskData + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(min_value.data())); + taskDataSeq->outputs_count.emplace_back(min_value.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, min_value[0]); +} diff --git a/tasks/seq/savchenko_m_min_matrix/src/ops_seq_savchenko.cpp b/tasks/seq/savchenko_m_min_matrix/src/ops_seq_savchenko.cpp new file mode 100644 index 00000000000..eeffe8fa8b5 --- /dev/null +++ b/tasks/seq/savchenko_m_min_matrix/src/ops_seq_savchenko.cpp @@ -0,0 +1,42 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/savchenko_m_min_matrix/include/ops_seq_savchenko.hpp" + +bool savchenko_m_min_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0 && taskData->outputs_count[0] == 1; +} + +bool savchenko_m_min_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + columns = taskData->inputs_count[0]; + rows = taskData->inputs_count[1]; + matrix = std::vector(rows * columns); + + auto *tmp = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < rows; i++) { + for (size_t j = 0; j < columns; ++j) { + matrix[i * columns + j] = tmp[i * columns + j]; + } + } + res = matrix[0]; + + return true; +} + +bool savchenko_m_min_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < matrix.size(); i++) { + if (matrix[i] < res) { + res = matrix[i]; + } + } + return true; +} + +bool savchenko_m_min_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From e3cd200b4ed4cb1f177d91d94c7b149f87e06e6c Mon Sep 17 00:00:00 2001 From: Emil <72946664+emilien-gus@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:33:23 +0300 Subject: [PATCH 088/155] =?UTF-8?q?=D0=93=D1=83=D1=81=D0=B5=D0=B9=D0=BD?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=AD=D0=BC=D0=B8=D0=BB.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=2026.=20=D0=9F=D1=80=D0=BE=D0=B2=D0=B5=D1=80=D0=BA=D0=B0?= =?UTF-8?q?=20=D0=BB=D0=B5=D0=BA=D1=81=D0=B8=D0=BA=D0=BE=D0=B3=D1=80=D0=B0?= =?UTF-8?q?=D1=84=D0=B8=D1=87=D0=B5=D1=81=D0=BA=D0=BE=D0=B9=20=D1=83=D0=BF?= =?UTF-8?q?=D0=BE=D1=80=D1=8F=D0=B4=D0=BE=D1=87=D0=B5=D0=BD=D0=BD=D0=BE?= =?UTF-8?q?=D1=81=D1=82=D0=B8=20=D0=B4=D0=B2=D1=83=D1=85=20=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=BE=D0=BA.=20(#114)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательное решение: Проверяем символы с одинаковыми индексами в словах на неравенство. Если окажется, что i-ый символы не совпадают, то, в случае если символ в первом слове лексиграфически больше символа во втором слове, то первое слово больше, иначе - первое слово меньше. Мы движемся по словам с помощью цыкла, пока не достигнем конца наименьшего слова. Если находим несоответсвие в символе, то записывыем результат и выходим из цыкла. Если цикл пройден полностью, то возможно 3 варианта. Первый - слова одинаковы(длины слов равны), второй - первое слово является префиксом второго слова(первое слово меньше), третий - второе слово префикс первого(первое слово больше). Паралельное решение: Каждый поток получает свои отрезки слов, делают проверки в циклах, потом собираем отдельные результаты в массив, где соблюдается нумерация потоков. В этом массиве сохранены локальные результаты проверки каждого сегмента. Двигаясь по этому массив ищем локальные результаты, указывающие на отличия в сегменте. Если находим такой результат, то заканчиваем поиск и сохраняем рузультат. Если же все участки совпадают, то проверяем слова на, то является ли одно слово префиксом другого. Проверка на префикс делается с помощью сравнение длин слов. --- .../func_tests/main.cpp | 448 ++++++++++++++++++ .../include/ops_mpi.hpp | 43 ++ .../perf_tests/main.cpp | 91 ++++ .../src/ops_mpi.cpp | 154 ++++++ .../func_tests/main.cpp | 196 ++++++++ .../include/ops_seq.hpp | 23 + .../perf_tests/main.cpp | 85 ++++ .../src/ops_seq.cpp | 50 ++ 8 files changed, 1090 insertions(+) create mode 100644 tasks/mpi/guseynov_e_check_lex_order_of_two_string/func_tests/main.cpp create mode 100644 tasks/mpi/guseynov_e_check_lex_order_of_two_string/include/ops_mpi.hpp create mode 100644 tasks/mpi/guseynov_e_check_lex_order_of_two_string/perf_tests/main.cpp create mode 100644 tasks/mpi/guseynov_e_check_lex_order_of_two_string/src/ops_mpi.cpp create mode 100644 tasks/seq/guseynov_e_check_lex_order_of_two_string/func_tests/main.cpp create mode 100644 tasks/seq/guseynov_e_check_lex_order_of_two_string/include/ops_seq.hpp create mode 100644 tasks/seq/guseynov_e_check_lex_order_of_two_string/perf_tests/main.cpp create mode 100644 tasks/seq/guseynov_e_check_lex_order_of_two_string/src/ops_seq.cpp diff --git a/tasks/mpi/guseynov_e_check_lex_order_of_two_string/func_tests/main.cpp b/tasks/mpi/guseynov_e_check_lex_order_of_two_string/func_tests/main.cpp new file mode 100644 index 00000000000..7a4ad0129b6 --- /dev/null +++ b/tasks/mpi/guseynov_e_check_lex_order_of_two_string/func_tests/main.cpp @@ -0,0 +1,448 @@ +#include + +#include +#include +#include + +#include "mpi/guseynov_e_check_lex_order_of_two_string/include/ops_mpi.hpp" + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, Test_empty_strings) { + boost::mpi::communicator world; + std::vector> global_vec(2); + std::vector global_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential testMPITaskSequantial(taskDataSeq); + ASSERT_EQ(testMPITaskSequantial.validation(), true); + testMPITaskSequantial.pre_processing(); + testMPITaskSequantial.run(); + testMPITaskSequantial.post_processing(); + ASSERT_EQ(reference_res[0], global_res[0]); + } +} + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, Test_first_string_is_empty) { + boost::mpi::communicator world; + std::vector> global_vec(2); + global_vec[1] = std::vector(120, 'a'); + std::vector global_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential testMPITaskSequantial(taskDataSeq); + ASSERT_EQ(testMPITaskSequantial.validation(), true); + testMPITaskSequantial.pre_processing(); + testMPITaskSequantial.run(); + testMPITaskSequantial.post_processing(); + ASSERT_EQ(reference_res[0], global_res[0]); + } +} + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, Test_second_string_is_empty) { + boost::mpi::communicator world; + std::vector> global_vec(2); + global_vec[0] = std::vector(120, 'a'); + std::vector global_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential testMPITaskSequantial(taskDataSeq); + ASSERT_EQ(testMPITaskSequantial.validation(), true); + testMPITaskSequantial.pre_processing(); + testMPITaskSequantial.run(); + testMPITaskSequantial.post_processing(); + ASSERT_EQ(reference_res[0], global_res[0]); + } +} + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, Test_equal_words) { + boost::mpi::communicator world; + std::vector> global_vec(2, std::vector(120, 'a')); + std::vector global_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential testMPITaskSequantial(taskDataSeq); + ASSERT_EQ(testMPITaskSequantial.validation(), true); + testMPITaskSequantial.pre_processing(); + testMPITaskSequantial.run(); + testMPITaskSequantial.post_processing(); + ASSERT_EQ(reference_res[0], global_res[0]); + } +} + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, Test_second_string_is_greater) { + boost::mpi::communicator world; + std::vector> global_vec(2, std::vector(240, 'a')); + global_vec[1][239] = 'b'; + std::vector global_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential testMPITaskSequantial(taskDataSeq); + ASSERT_EQ(testMPITaskSequantial.validation(), true); + testMPITaskSequantial.pre_processing(); + testMPITaskSequantial.run(); + testMPITaskSequantial.post_processing(); + ASSERT_EQ(reference_res[0], global_res[0]); + } +} + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, Test_first_string_is_greater) { + boost::mpi::communicator world; + std::vector> global_vec(2, std::vector(240, 'a')); + global_vec[0][0] = 'b'; + std::vector global_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential testMPITaskSequantial(taskDataSeq); + ASSERT_EQ(testMPITaskSequantial.validation(), true); + testMPITaskSequantial.pre_processing(); + testMPITaskSequantial.run(); + testMPITaskSequantial.post_processing(); + ASSERT_EQ(reference_res[0], global_res[0]); + } +} + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, Test_first_string_is_prefix) { + boost::mpi::communicator world; + std::vector> global_vec(2, std::vector(360, 'a')); + global_vec[1].push_back('b'); + std::vector global_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential testMPITaskSequantial(taskDataSeq); + ASSERT_EQ(testMPITaskSequantial.validation(), true); + testMPITaskSequantial.pre_processing(); + testMPITaskSequantial.run(); + testMPITaskSequantial.post_processing(); + ASSERT_EQ(reference_res[0], global_res[0]); + } +} + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, Test_second_string_is_prefix) { + boost::mpi::communicator world; + std::vector> global_vec(2); + global_vec[0].push_back('b'); + std::vector global_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential testMPITaskSequantial(taskDataSeq); + ASSERT_EQ(testMPITaskSequantial.validation(), true); + testMPITaskSequantial.pre_processing(); + testMPITaskSequantial.run(); + testMPITaskSequantial.post_processing(); + ASSERT_EQ(reference_res[0], global_res[0]); + } +} + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, Test_random_strings) { + boost::mpi::communicator world; + const int vector_size = 520; + std::vector> global_vec(2); + global_vec[0] = guseynov_e_check_lex_order_of_two_string_mpi::getRandomVector(vector_size); + global_vec[1] = guseynov_e_check_lex_order_of_two_string_mpi::getRandomVector(vector_size); + std::vector global_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, -1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential testMPITaskSequantial(taskDataSeq); + ASSERT_EQ(testMPITaskSequantial.validation(), true); + testMPITaskSequantial.pre_processing(); + testMPITaskSequantial.run(); + testMPITaskSequantial.post_processing(); + ASSERT_EQ(reference_res[0], global_res[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/guseynov_e_check_lex_order_of_two_string/include/ops_mpi.hpp b/tasks/mpi/guseynov_e_check_lex_order_of_two_string/include/ops_mpi.hpp new file mode 100644 index 00000000000..c22d16e5d69 --- /dev/null +++ b/tasks/mpi/guseynov_e_check_lex_order_of_two_string/include/ops_mpi.hpp @@ -0,0 +1,43 @@ +#pragma once + +#include + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace guseynov_e_check_lex_order_of_two_string_mpi { + +std::vector getRandomVector(int sz); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector local_input_1_, local_input_2_; + int res_{}; + boost::mpi::communicator world; +}; + +} // namespace guseynov_e_check_lex_order_of_two_string_mpi \ No newline at end of file diff --git a/tasks/mpi/guseynov_e_check_lex_order_of_two_string/perf_tests/main.cpp b/tasks/mpi/guseynov_e_check_lex_order_of_two_string/perf_tests/main.cpp new file mode 100644 index 00000000000..5b4fba9ad1e --- /dev/null +++ b/tasks/mpi/guseynov_e_check_lex_order_of_two_string/perf_tests/main.cpp @@ -0,0 +1,91 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/guseynov_e_check_lex_order_of_two_string/include/ops_mpi.hpp" + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector> global_vec(2, std::vector(25000000, 'a')); + std::vector global_res(1, -1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto testMPITaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMPITaskParallel->validation(), true); + testMPITaskParallel->pre_processing(); + testMPITaskParallel->run(); + testMPITaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMPITaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_res[0]); + } +} + +TEST(guseynov_e_check_lex_order_of_two_string_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector> global_vec(2, std::vector(25000000, 'a')); + std::vector global_res(1, -1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[1].data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto testMPITaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMPITaskParallel->validation(), true); + testMPITaskParallel->pre_processing(); + testMPITaskParallel->run(); + testMPITaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMPITaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_res[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/guseynov_e_check_lex_order_of_two_string/src/ops_mpi.cpp b/tasks/mpi/guseynov_e_check_lex_order_of_two_string/src/ops_mpi.cpp new file mode 100644 index 00000000000..3a2606ddadc --- /dev/null +++ b/tasks/mpi/guseynov_e_check_lex_order_of_two_string/src/ops_mpi.cpp @@ -0,0 +1,154 @@ +#include "mpi/guseynov_e_check_lex_order_of_two_string/include/ops_mpi.hpp" + +#include +#include + +std::vector guseynov_e_check_lex_order_of_two_string_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = static_cast(gen() % (126 - 32 + 1) + 32); + } + return vec; +} + +bool guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // init vectors + input_ = std::vector>(taskData->inputs_count[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i + 1]); + for (unsigned j = 0; j < taskData->inputs_count[i + 1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res_ = 0; + return true; +} + +bool guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // check count of words and count of elements of output + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential::run() { + internal_order_test(); + size_t min_string_len = std::min(input_[0].size(), input_[1].size()); + for (size_t i = 0; i < min_string_len; i++) { + if (input_[0][i] < input_[1][i]) { + res_ = 1; + break; + } + if (input_[0][i] > input_[1][i]) { + res_ = 2; + break; + } + } + if (res_ == 0 && input_[0].size() != input_[1].size()) { + if (input_[0].size() > input_[1].size()) { + res_ = 2; + } else { + res_ = 1; + } + } + return true; +} + +bool guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + return true; +} + +bool guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = std::min(taskData->inputs_count[1], taskData->inputs_count[2]) / world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // init vectors + input_ = std::vector>(taskData->inputs_count[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i + 1]); + for (unsigned j = 0; j < taskData->inputs_count[i + 1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_[0].data() + delta * proc, delta); + world.send(proc, 1, input_[1].data() + delta * proc, delta); + } + } + local_input_1_ = std::vector(delta); + local_input_2_ = std::vector(delta); + if (world.rank() == 0) { + local_input_1_ = std::vector(input_[0].begin(), input_[0].begin() + delta); + local_input_2_ = std::vector(input_[1].begin(), input_[1].begin() + delta); + } else { + world.recv(0, 0, local_input_1_.data(), delta); + world.recv(0, 1, local_input_2_.data(), delta); + } + // Init value for output + res_ = 0; + + // Transfer data to processes + int local_res = 0; + for (size_t i = 0; i < local_input_1_.size(); i++) { + if (local_input_1_[i] < local_input_2_[i]) { + local_res = 1; + break; + } + if (local_input_1_[i] > local_input_2_[i]) { + local_res = 2; + break; + } + } + + std::vector gathered_data; + boost::mpi::gather(world, local_res, gathered_data, 0); + + if (world.rank() == 0) { + for (int proc = 0; proc < world.size(); proc++) { + if (gathered_data[proc] != 0) { + res_ = gathered_data[proc]; + break; + } + } + if (res_ == 0 && input_[0].size() != input_[1].size()) { + if (input_[0].size() > input_[1].size()) { + res_ = 2; + } else { + res_ = 1; + } + } + } + return true; +} + +bool guseynov_e_check_lex_order_of_two_string_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/guseynov_e_check_lex_order_of_two_string/func_tests/main.cpp b/tasks/seq/guseynov_e_check_lex_order_of_two_string/func_tests/main.cpp new file mode 100644 index 00000000000..5bd6af845c6 --- /dev/null +++ b/tasks/seq/guseynov_e_check_lex_order_of_two_string/func_tests/main.cpp @@ -0,0 +1,196 @@ +#include + +#include +#include + +TEST(guseynov_e_check_lex_order_of_two_string_seq, Test_empty_strings) { + // create data + std::vector> in = {{}, {}}; + std::vector out(1, -1); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential testTaskSequantial(taskDataSeq); + ASSERT_EQ(testTaskSequantial.validation(), true); + testTaskSequantial.pre_processing(); + testTaskSequantial.run(); + testTaskSequantial.post_processing(); + ASSERT_EQ(out[0], 0); +} + +TEST(guseynov_e_check_lex_order_of_two_string_seq, Test_first_string_is_empty) { + // create data + std::vector> in = {{}, {'c', 'a', 't'}}; + std::vector out(1, -1); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential testTaskSequantial(taskDataSeq); + ASSERT_EQ(testTaskSequantial.validation(), true); + testTaskSequantial.pre_processing(); + testTaskSequantial.run(); + testTaskSequantial.post_processing(); + ASSERT_EQ(out[0], 1); +} + +TEST(guseynov_e_check_lex_order_of_two_string_seq, Test_second_string_is_empty) { + // create data + std::vector> in = {{'c', 'a', 't'}, {}}; + std::vector out(1, -1); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential testTaskSequantial(taskDataSeq); + ASSERT_EQ(testTaskSequantial.validation(), true); + testTaskSequantial.pre_processing(); + testTaskSequantial.run(); + testTaskSequantial.post_processing(); + ASSERT_EQ(out[0], 2); +} + +TEST(guseynov_e_check_lex_order_of_two_string_seq, Test_equal_strings) { + // create data + std::vector> in = {{'c', 'a', 't'}, {'c', 'a', 't'}}; + std::vector out(1, -1); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential testTaskSequantial(taskDataSeq); + ASSERT_EQ(testTaskSequantial.validation(), true); + testTaskSequantial.pre_processing(); + testTaskSequantial.run(); + testTaskSequantial.post_processing(); + ASSERT_EQ(out[0], 0); +} + +TEST(guseynov_e_check_lex_order_of_two_string_seq, Test_second_string_is_greater) { + // create data + std::vector> in = {{'a', 'p', 'p', 'l', 'e'}, {'b', 'a', 'n', 'a', 'n'}}; + std::vector out(1, -1); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential testTaskSequantial(taskDataSeq); + ASSERT_EQ(testTaskSequantial.validation(), true); + testTaskSequantial.pre_processing(); + testTaskSequantial.run(); + testTaskSequantial.post_processing(); + ASSERT_EQ(out[0], 1); +} + +TEST(guseynov_e_check_lex_order_of_two_string_seq, Test_first_string_is_greater) { + // create data + std::vector> in = {{'d', 'o', 'g'}, {'c', 'a', 't'}}; + std::vector out(1, -1); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential testTaskSequantial(taskDataSeq); + ASSERT_EQ(testTaskSequantial.validation(), true); + testTaskSequantial.pre_processing(); + testTaskSequantial.run(); + testTaskSequantial.post_processing(); + ASSERT_EQ(out[0], 2); +} + +TEST(guseynov_e_check_lex_order_of_two_string_seq, Test_first_string_is_prefix) { + // create data + std::vector> in = {{'a', 'b', 'c'}, {'a', 'b', 'c', 'd'}}; + std::vector out(1, -1); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential testTaskSequantial(taskDataSeq); + ASSERT_EQ(testTaskSequantial.validation(), true); + testTaskSequantial.pre_processing(); + testTaskSequantial.run(); + testTaskSequantial.post_processing(); + ASSERT_EQ(out[0], 1); +} + +TEST(guseynov_e_check_lex_order_of_two_string_seq, Test_second_string_is_prefix) { + // create data + std::vector> in = {{'a', 'b', 'c', 'd'}, {'a', 'b', 'c'}}; + std::vector out(1, -1); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential testTaskSequantial(taskDataSeq); + ASSERT_EQ(testTaskSequantial.validation(), true); + testTaskSequantial.pre_processing(); + testTaskSequantial.run(); + testTaskSequantial.post_processing(); + ASSERT_EQ(out[0], 2); +} \ No newline at end of file diff --git a/tasks/seq/guseynov_e_check_lex_order_of_two_string/include/ops_seq.hpp b/tasks/seq/guseynov_e_check_lex_order_of_two_string/include/ops_seq.hpp new file mode 100644 index 00000000000..93b46a16bc3 --- /dev/null +++ b/tasks/seq/guseynov_e_check_lex_order_of_two_string/include/ops_seq.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace guseynov_e_check_lex_order_of_two_string_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +} // namespace guseynov_e_check_lex_order_of_two_string_seq \ No newline at end of file diff --git a/tasks/seq/guseynov_e_check_lex_order_of_two_string/perf_tests/main.cpp b/tasks/seq/guseynov_e_check_lex_order_of_two_string/perf_tests/main.cpp new file mode 100644 index 00000000000..8d1b16381e8 --- /dev/null +++ b/tasks/seq/guseynov_e_check_lex_order_of_two_string/perf_tests/main.cpp @@ -0,0 +1,85 @@ +#include + +#include +#include +#include + +TEST(guseynov_e_check_lex_order_of_two_string_seq, test_pipeline_run) { + // create data + std::vector> in(2, std::vector(20000000, 'a')); + in[1].push_back('a'); + std::vector out(1, 0); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + auto testTaskSequantial = + std::make_shared(taskDataSeq); + + // Create perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequantial); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], 1); +} + +TEST(guseynov_e_check_lex_order_of_two_string_seq, test_task_run) { + // create data + std::vector> in(2, std::vector(20000000, 'a')); + in[1].push_back('a'); + std::vector out(1, 0); + + // create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->inputs_count.emplace_back(in[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // create Task + auto testTaskSequantial = + std::make_shared(taskDataSeq); + + // Create perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequantial); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], 1); +} diff --git a/tasks/seq/guseynov_e_check_lex_order_of_two_string/src/ops_seq.cpp b/tasks/seq/guseynov_e_check_lex_order_of_two_string/src/ops_seq.cpp new file mode 100644 index 00000000000..71358236b18 --- /dev/null +++ b/tasks/seq/guseynov_e_check_lex_order_of_two_string/src/ops_seq.cpp @@ -0,0 +1,50 @@ +#include "seq/guseynov_e_check_lex_order_of_two_string/include/ops_seq.hpp" + +bool guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector>(taskData->inputs_count[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + auto *tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i + 1]); + for (unsigned j = 0; j < taskData->inputs_count[i + 1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + + res_ = 0; + return true; +} + +bool guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential::run() { + internal_order_test(); + size_t min_string_len = std::min(input_[0].size(), input_[1].size()); + for (size_t i = 0; i < min_string_len; i++) { + if (input_[0][i] < input_[1][i]) { + res_ = 1; + break; + } + if (input_[0][i] > input_[1][i]) { + res_ = 2; + break; + } + } + if (res_ == 0 && input_[0].size() != input_[1].size()) { + if (input_[0].size() > input_[1].size()) { + res_ = 2; + } else { + res_ = 1; + } + } + return true; +} + +bool guseynov_e_check_lex_order_of_two_string_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} \ No newline at end of file From 32c5dd9738bacc6e1bdd9faf33c01c29da15b177 Mon Sep 17 00:00:00 2001 From: Tanya Yasakova <113035646+Tanya-Yasakova@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:33:54 +0300 Subject: [PATCH 089/155] =?UTF-8?q?=D0=AF=D1=81=D0=B0=D0=BA=D0=BE=D0=B2?= =?UTF-8?q?=D0=B0=20=D0=A2=D0=B0=D1=82=D1=8C=D1=8F=D0=BD=D0=B0.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=2014.=20=D0=9C=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0?= =?UTF-8?q?=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#?= =?UTF-8?q?115)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательное решение: - последовательный перебор всех элементов матрицы Паралельное решение: - при считывании данных из taskData матрица преобразуется в развернутую (объединенную в один вектор) - развернутая матрица делится на сегменты по количеству процессов для обеспечения равномерного распределения данных - каждый сегмент передается процессам, - каждый процесс находит минимум в своем сегменте данных - reduce проходит по всем процессам и выбирает минимальный из промежуточных результатов --- .../func_tests/main_yasakova.cpp | 190 ++++++++++++++++++ .../include/ops_mpi_yasakova.hpp | 42 ++++ .../perf_tests/main.cpp | 106 ++++++++++ .../src/ops_mpi_yasakova.cpp | 89 ++++++++ .../func_tests/main_yasakova.cpp | 161 +++++++++++++++ .../include/ops_seq_yasakova.hpp | 21 ++ .../perf_tests/main_yasakova.cpp | 98 +++++++++ .../src/ops_seq_yasakova.cpp | 42 ++++ 8 files changed, 749 insertions(+) create mode 100644 tasks/mpi/yasakova_t_min_of_vector_elements/func_tests/main_yasakova.cpp create mode 100644 tasks/mpi/yasakova_t_min_of_vector_elements/include/ops_mpi_yasakova.hpp create mode 100644 tasks/mpi/yasakova_t_min_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/yasakova_t_min_of_vector_elements/src/ops_mpi_yasakova.cpp create mode 100644 tasks/seq/yasakova_t_min_of_vector_elements/func_tests/main_yasakova.cpp create mode 100644 tasks/seq/yasakova_t_min_of_vector_elements/include/ops_seq_yasakova.hpp create mode 100644 tasks/seq/yasakova_t_min_of_vector_elements/perf_tests/main_yasakova.cpp create mode 100644 tasks/seq/yasakova_t_min_of_vector_elements/src/ops_seq_yasakova.cpp diff --git a/tasks/mpi/yasakova_t_min_of_vector_elements/func_tests/main_yasakova.cpp b/tasks/mpi/yasakova_t_min_of_vector_elements/func_tests/main_yasakova.cpp new file mode 100644 index 00000000000..431a2fa8e48 --- /dev/null +++ b/tasks/mpi/yasakova_t_min_of_vector_elements/func_tests/main_yasakova.cpp @@ -0,0 +1,190 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/yasakova_t_min_of_vector_elements/include/ops_mpi_yasakova.hpp" + +std::vector RandomVector(int size, int minimum = 0, int maximum = 100) { + std::mt19937 gen; + std::vector vec(size); + for (int i = 0; i < size; i++) { + vec[i] = minimum + gen() % (maximum - minimum + 1); + } + return vec; +} + +std::vector> RandomMatrix(int rows, int columns, int minimum = 0, int maximum = 100) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = RandomVector(columns, minimum, maximum); + } + return vec; +} + +TEST(yasakova_t_min_of_vector_elements_mpi, testFindMinimumIn10x10Matrix) { + const int count_rows = 10; + const int count_columns = 10; + const int gen_minimum = -500; + const int gen_maximum = 500; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_minimum(1, INT_MAX); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_minimum.data())); + taskDataPar->outputs_count.emplace_back(global_minimum.size()); + } + yasakova_t_min_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_minimum(1, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_minimum.data())); + taskDataSeq->outputs_count.emplace_back(reference_minimum.size()); + yasakova_t_min_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_minimum[0], global_minimum[0]); + } +} + +TEST(yasakova_t_min_of_vector_elements_mpi, testFindMinimumIn10x100Matrix) { + const int count_rows = 10; + const int count_columns = 100; + const int gen_minimum = -500; + const int gen_maximum = 500; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_minimum(1, INT_MAX); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_minimum.data())); + taskDataPar->outputs_count.emplace_back(global_minimum.size()); + } + yasakova_t_min_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_minimum(1, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_minimum.data())); + taskDataSeq->outputs_count.emplace_back(reference_minimum.size()); + yasakova_t_min_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_minimum[0], global_minimum[0]); + } +} + +TEST(yasakova_t_min_of_vector_elements_mpi, testFindMinimumIn100x10Matrix) { + const int count_rows = 100; + const int count_columns = 10; + const int gen_minimum = -500; + const int gen_maximum = 500; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_minimum(1, INT_MAX); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_minimum.data())); + taskDataPar->outputs_count.emplace_back(global_minimum.size()); + } + yasakova_t_min_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_minimum(1, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_minimum.data())); + taskDataSeq->outputs_count.emplace_back(reference_minimum.size()); + yasakova_t_min_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_minimum[0], global_minimum[0]); + } +} + +TEST(yasakova_t_minof_vector_elements_mpi, testFindMinimumIn100x100Matrix) { + const int count_rows = 100; + const int count_columns = 100; + const int gen_minimum = -500; + const int gen_maximum = 500; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_minimum(1, INT_MAX); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_minimum.data())); + taskDataPar->outputs_count.emplace_back(global_minimum.size()); + } + yasakova_t_min_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_minimum(1, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_minimum.data())); + taskDataSeq->outputs_count.emplace_back(reference_minimum.size()); + yasakova_t_min_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_minimum[0], global_minimum[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/yasakova_t_min_of_vector_elements/include/ops_mpi_yasakova.hpp b/tasks/mpi/yasakova_t_min_of_vector_elements/include/ops_mpi_yasakova.hpp new file mode 100644 index 00000000000..ed439bb82bc --- /dev/null +++ b/tasks/mpi/yasakova_t_min_of_vector_elements/include/ops_mpi_yasakova.hpp @@ -0,0 +1,42 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include + +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace yasakova_t_min_of_vector_elements_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> inputValues_; + int res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector inputValues_, localInputValues_; + int res_{}; + boost::mpi::communicator world; +}; + +} // namespace yasakova_t_min_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/yasakova_t_min_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/yasakova_t_min_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..58a51455b41 --- /dev/null +++ b/tasks/mpi/yasakova_t_min_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,106 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/yasakova_t_min_of_vector_elements/include/ops_mpi_yasakova.hpp" + +std::vector RandomVector(int size, int minimum = 0, int maximum = 100) { + std::mt19937 gen; + std::vector vec(size); + for (int i = 0; i < size; i++) { + vec[i] = minimum + gen() % (maximum - minimum + 1); + } + return vec; +} + +std::vector> RandomMatrix(int rows, int columns, int minimum = 0, int maximum = 100) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = RandomVector(columns, minimum, maximum); + } + return vec; +} + +TEST(yasakova_t_min_of_vector_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_minimum(1, INT_MAX); + int ref = INT_MIN; + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows = 4000; + int count_columns = 4000; + int gen_minimum = -500; + int gen_maximum = 500; + global_matrix = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + std::mt19937 gen; + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + if (world.rank() == 0) { + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_minimum.data())); + taskDataPar->outputs_count.emplace_back(global_minimum.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_minimum[0]); + } +} + +TEST(yasakova_t_min_of_vector_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_sum(1, INT_MAX); + int ref = INT_MIN; + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows = 4000; + int count_columns = 4000; + int gen_minimum = -500; + int gen_maximum = 500; + global_matrix = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + std::mt19937 gen; + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + if (world.rank() == 0) { + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_sum[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/yasakova_t_min_of_vector_elements/src/ops_mpi_yasakova.cpp b/tasks/mpi/yasakova_t_min_of_vector_elements/src/ops_mpi_yasakova.cpp new file mode 100644 index 00000000000..b7dfe5b99ed --- /dev/null +++ b/tasks/mpi/yasakova_t_min_of_vector_elements/src/ops_mpi_yasakova.cpp @@ -0,0 +1,89 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/yasakova_t_min_of_vector_elements/include/ops_mpi_yasakova.hpp" + +#include +#include +#include + +bool yasakova_t_min_of_vector_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + inputValues_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], inputValues_[i].begin()); + } + res_ = INT_MAX; + return true; +} + +bool yasakova_t_min_of_vector_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0; +} + +bool yasakova_t_min_of_vector_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + std::vector local_res(inputValues_.size()); + for (unsigned int i = 0; i < inputValues_.size(); i++) { + local_res[i] = *std::min_element(inputValues_[i].begin(), inputValues_[i].end()); + } + res_ = *std::min_element(local_res.begin(), local_res.end()); + return true; +} + +bool yasakova_t_min_of_vector_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool yasakova_t_min_of_vector_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + res_ = INT_MAX; + return true; +} +bool yasakova_t_min_of_vector_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1 && !taskData->inputs.empty(); + } + return true; +} +bool yasakova_t_min_of_vector_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] * taskData->inputs_count[1] / world.size(); + } + broadcast(world, delta, 0); + if (world.rank() == 0) { + unsigned int rows = taskData->inputs_count[0]; + unsigned int columns = taskData->inputs_count[1]; + inputValues_ = std::vector(rows * columns); + for (unsigned int i = 0; i < rows; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < columns; j++) { + inputValues_[i * columns + j] = tmp_ptr[j]; + } + } + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, inputValues_.data() + delta * proc, delta); + } + } + localInputValues_ = std::vector(delta); + if (world.rank() == 0) { + localInputValues_ = std::vector(inputValues_.begin(), inputValues_.begin() + delta); + } else { + world.recv(0, 0, localInputValues_.data(), delta); + } + int local_res = *std::min_element(localInputValues_.begin(), localInputValues_.end()); + reduce(world, local_res, res_, boost::mpi::minimum(), 0); + return true; +} +bool yasakova_t_min_of_vector_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/yasakova_t_min_of_vector_elements/func_tests/main_yasakova.cpp b/tasks/seq/yasakova_t_min_of_vector_elements/func_tests/main_yasakova.cpp new file mode 100644 index 00000000000..594cfe74b75 --- /dev/null +++ b/tasks/seq/yasakova_t_min_of_vector_elements/func_tests/main_yasakova.cpp @@ -0,0 +1,161 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "seq/yasakova_t_min_of_vector_elements/include/ops_seq_yasakova.hpp" + +std::vector RandomVector(int size, int minimum = 0, int maximum = 100) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(size); + for (int i = 0; i < size; i++) { + vec[i] = minimum + gen() % (maximum - minimum + 1); + } + return vec; +} + +std::vector> RandomMatrix(int rows, int columns, int minimum = 0, int maximum = 100) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = RandomVector(columns, minimum, maximum); + } + return vec; +} + +TEST(yasakova_t_min_of_vector_elements_seq, testFindMinimumInMatrixWithOneRow) { + std::random_device dev; + std::mt19937 gen(dev()); + const int count_rows = 1; + const int count_columns = 10; + const int gen_minimum = -500; + const int gen_maximum = 500; + int ref = INT_MIN; + std::vector out(1, INT_MAX); + std::vector> in = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + int index = gen() % count_columns; + in[0][index] = ref; + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + yasakova_t_min_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, out[0]); +} + +TEST(yasakova_t_min_of_vector_elements_seq, testFindMinimumIn10x10Matrix) { + std::random_device dev; + std::mt19937 gen(dev()); + const int count_rows = 10; + const int count_columns = 10; + const int gen_minimum = -500; + const int gen_maximum = 500; + int ref = INT_MIN; + std::vector out(1, INT_MAX); + std::vector> in = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + yasakova_t_min_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, out[0]); +} + +TEST(yasakova_t_min_of_vector_elements_seq, testFindMinimumIn10x100Matrix) { + std::random_device dev; + std::mt19937 gen(dev()); + const int count_rows = 10; + const int count_columns = 100; + const int gen_minimum = -500; + const int gen_maximum = 500; + int ref = INT_MIN; + std::vector out(1, INT_MAX); + std::vector> in = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + yasakova_t_min_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, out[0]); +} + +TEST(yasakova_t_min_of_vector_elements_seq, testFindMinimumIn100x10Matrix) { + std::random_device dev; + std::mt19937 gen(dev()); + const int count_rows = 100; + const int count_columns = 10; + const int gen_minimum = -500; + const int gen_maximum = 500; + int ref = INT_MIN; + std::vector out(1, INT_MAX); + std::vector> in = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + yasakova_t_min_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, out[0]); +} + +TEST(yasakova_t_min_of_vector_elements_seq, testFindMinimumIn100x100Matrix) { + std::random_device dev; + std::mt19937 gen(dev()); + const int count_rows = 100; + const int count_columns = 100; + const int gen_minimum = -500; + const int gen_maximum = 500; + int ref = INT_MIN; + std::vector out(1, INT_MAX); + std::vector> in = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + yasakova_t_min_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ref, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/yasakova_t_min_of_vector_elements/include/ops_seq_yasakova.hpp b/tasks/seq/yasakova_t_min_of_vector_elements/include/ops_seq_yasakova.hpp new file mode 100644 index 00000000000..12dd6f1d820 --- /dev/null +++ b/tasks/seq/yasakova_t_min_of_vector_elements/include/ops_seq_yasakova.hpp @@ -0,0 +1,21 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace yasakova_t_min_of_vector_elements_seq { +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; +} // namespace yasakova_t_min_of_vector_elements_seq diff --git a/tasks/seq/yasakova_t_min_of_vector_elements/perf_tests/main_yasakova.cpp b/tasks/seq/yasakova_t_min_of_vector_elements/perf_tests/main_yasakova.cpp new file mode 100644 index 00000000000..9c7180f157d --- /dev/null +++ b/tasks/seq/yasakova_t_min_of_vector_elements/perf_tests/main_yasakova.cpp @@ -0,0 +1,98 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/yasakova_t_min_of_vector_elements/include/ops_seq_yasakova.hpp" + +std::vector RandomVector(int size, int minimum = 0, int maximum = 100) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(size); + for (int i = 0; i < size; i++) { + vec[i] = minimum + gen() % (maximum - minimum + 1); + } + return vec; +} + +std::vector> RandomMatrix(int rows, int columns, int minimum = 0, int maximum = 100) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = RandomVector(columns, minimum, maximum); + } + return vec; +} + +TEST(yasakova_t_min_of_vector_elements_seq, test_pipeline_run) { + std::vector> global_matrix; + std::vector global_minimum(1, INT_MAX); + int ref = INT_MIN; + std::random_device dev; + std::mt19937 gen(dev()); + std::shared_ptr taskDataSeq = std::make_shared(); + int count_rows = 4000; + int count_columns = 4000; + int gen_minimum = -500; + int gen_maximum = 500; + global_matrix = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_minimum.data())); + taskDataSeq->outputs_count.emplace_back(global_minimum.size()); + auto testTaskSequential = std::make_shared(taskDataSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_minimum[0]); +} + +TEST(yasakova_t_min_of_vector_elements_seq, test_task_run) { + std::vector> global_matrix; + std::vector global_minimum(1, INT_MAX); + int ref = INT_MIN; + std::random_device dev; + std::mt19937 gen(dev()); + std::shared_ptr taskDataSeq = std::make_shared(); + int count_rows = 4000; + int count_columns = 4000; + int gen_minimum = -500; + int gen_maximum = 500; + global_matrix = RandomMatrix(count_rows, count_columns, gen_minimum, gen_maximum); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_minimum.data())); + taskDataSeq->outputs_count.emplace_back(global_minimum.size()); + auto testTaskSequential = std::make_shared(taskDataSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_minimum[0]); +} \ No newline at end of file diff --git a/tasks/seq/yasakova_t_min_of_vector_elements/src/ops_seq_yasakova.cpp b/tasks/seq/yasakova_t_min_of_vector_elements/src/ops_seq_yasakova.cpp new file mode 100644 index 00000000000..43008c14515 --- /dev/null +++ b/tasks/seq/yasakova_t_min_of_vector_elements/src/ops_seq_yasakova.cpp @@ -0,0 +1,42 @@ +// Copyright 2024 Nesterov Alexander + +#include "seq/yasakova_t_min_of_vector_elements/include/ops_seq_yasakova.hpp" + +#include +#include + +bool yasakova_t_min_of_vector_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res_ = INT_MAX; + return true; +} + +bool yasakova_t_min_of_vector_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0 && taskData->outputs_count[0] == 1; +} + +bool yasakova_t_min_of_vector_elements_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); i++) { + for (size_t j = 0; j < input_[i].size(); j++) { + if (input_[i][j] < res_) { + res_ = input_[i][j]; + } + } + } + return true; +} + +bool yasakova_t_min_of_vector_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} \ No newline at end of file From 0d1950e9b13a438881fef74d0f733e3f30e0898a Mon Sep 17 00:00:00 2001 From: Kvoks <114129799+Kvoks@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:36:17 +0300 Subject: [PATCH 090/155] =?UTF-8?q?=D0=A5=D0=BE=D0=B2=D0=B0=D0=BD=D1=81?= =?UTF-8?q?=D0=BA=D0=B8=D0=B9=20=D0=94=D0=BC=D0=B8=D1=82=D1=80=D0=B8=D0=B9?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=203.=20=D0=9C=D0=B0=D0=BA=D1=81?= =?UTF-8?q?=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD?= =?UTF-8?q?=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC?= =?UTF-8?q?=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE?= =?UTF-8?q?=D1=80=D0=B0.=20=20(#118)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Задача нахождения максимального элемента в векторе заключается в том, чтобы определить наибольшее значение среди элементов одномерного вектора целых чисел. Для корректного выполнения алгоритма входные данные должны содержать хотя бы один элемент, а результат должен быть представлен единственным числом. Описание последовательной задачи: 1.Инициализация данных: Входной вектор инициализируется значениями из входных данных. 2.Поиск максимального элемента: Выполняется проход по вектору, начиная с первого элемента. Для каждого элемента сравнивается текущее максимальное значение с текущим элементом вектора. Если текущий элемент больше, он становится новым максимальным значением. 3.Обработка результата: После завершения прохода по вектору максимальное значение сохраняется и возвращается в выходные данные. Описание MPI задачи: 1.Разделение данных: Процесс с рангом 0 делит входной вектор на блоки, равномерно распределяя их между всеми процессами. Каждый процесс получает свою часть вектора для обработки. 2.Вычисления: Каждый процесс обрабатывает свой блок данных, выполняя поиск максимального значения в пределах своего блока. 3.Сбор результатов: Все процессы возвращают свои частичные результаты (максимальные значения для своих блоков) процессу 0. Для объединения результатов используется операция reduce, которая находит общее максимальное значение. 4.Обработка результата: Процесс с рангом 0 собирает и возвращает итоговое максимальное значение как результат выполнения задачи. --- .../func_tests/main.cpp | 245 +++++++++++++ .../include/ops_mpi.hpp | 45 +++ .../perf_tests/main.cpp | 105 ++++++ .../src/ops_mpi.cpp | 113 ++++++ .../func_tests/main.cpp | 338 ++++++++++++++++++ .../include/ops_seq.hpp | 23 ++ .../perf_tests/main.cpp | 175 +++++++++ .../src/ops_seq.cpp | 51 +++ 8 files changed, 1095 insertions(+) create mode 100644 tasks/mpi/khovansky_d_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/khovansky_d_max_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/khovansky_d_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/khovansky_d_max_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/khovansky_d_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/khovansky_d_max_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/khovansky_d_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/khovansky_d_max_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/khovansky_d_max_of_vector_elements/func_tests/main.cpp b/tasks/mpi/khovansky_d_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..1bd6871ba6e --- /dev/null +++ b/tasks/mpi/khovansky_d_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,245 @@ +// Copyright 2024 Khovansky Dmitry +#include + +#include +#include +#include +#include + +#include "mpi/khovansky_d_max_of_vector_elements/include/ops_mpi.hpp" + +std::vector GetRandomVectorForMax(int sz, int left, int right) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector v(sz); + for (int i = 0; i < sz; i++) { + v[i] = gen() % (1 + right - left) + left; + } + return v; +} + +TEST(khovansky_d_max_of_vector_elements, Test_Max) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + const int left = 0; + const int right = 100; + global_vec = GetRandomVectorForMax(count_size_vector, left, right); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPIParallel maxOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(maxOfVectorMPIParalle.validation(), true); + maxOfVectorMPIParalle.pre_processing(); + maxOfVectorMPIParalle.run(); + maxOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPISequential maxOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(maxOfVectorMPISequential.validation(), true); + maxOfVectorMPISequential.pre_processing(); + maxOfVectorMPISequential.run(); + maxOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(khovansky_d_max_of_vector_elements, Test_Max_LargeVector) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10000; + const int left = 0; + const int right = 100; + global_vec = GetRandomVectorForMax(count_size_vector, left, right); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPIParallel maxOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(maxOfVectorMPIParalle.validation(), true); + maxOfVectorMPIParalle.pre_processing(); + maxOfVectorMPIParalle.run(); + maxOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPISequential maxOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(maxOfVectorMPISequential.validation(), true); + maxOfVectorMPISequential.pre_processing(); + maxOfVectorMPISequential.run(); + maxOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(khovansky_d_max_of_vector_elements, Test_Max_Negative_Values) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + const int left = -100; + const int right = -1; + global_vec = GetRandomVectorForMax(count_size_vector, left, right); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPIParallel maxOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(maxOfVectorMPIParalle.validation(), true); + maxOfVectorMPIParalle.pre_processing(); + maxOfVectorMPIParalle.run(); + maxOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPISequential maxOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(maxOfVectorMPISequential.validation(), true); + maxOfVectorMPISequential.pre_processing(); + maxOfVectorMPISequential.run(); + maxOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(khovansky_d_max_of_vector_elements, Test_Max_RepeatingValues) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + global_vec = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10}; + global_vec.resize(count_size_vector, 10); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPIParallel maxOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(maxOfVectorMPIParalle.validation(), true); + maxOfVectorMPIParalle.pre_processing(); + maxOfVectorMPIParalle.run(); + maxOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPISequential maxOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(maxOfVectorMPISequential.validation(), true); + maxOfVectorMPISequential.pre_processing(); + maxOfVectorMPISequential.run(); + maxOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(khovansky_d_max_of_vector_elements, Test_Max_Empty_Vector) { + boost::mpi::communicator world; + std::vector global_vec = {}; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPIParallel maxOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(maxOfVectorMPIParalle.validation(), true); + maxOfVectorMPIParalle.pre_processing(); + maxOfVectorMPIParalle.run(); + maxOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPISequential maxOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(maxOfVectorMPISequential.validation(), true); + maxOfVectorMPISequential.pre_processing(); + maxOfVectorMPISequential.run(); + maxOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/khovansky_d_max_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/khovansky_d_max_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..122a12ef662 --- /dev/null +++ b/tasks/mpi/khovansky_d_max_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,45 @@ +// Copyright 2024 Khovansky Dmitry +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace khovansky_d_max_of_vector_elements_mpi { + +class MaxOfVectorMPISequential : public ppc::core::Task { + public: + explicit MaxOfVectorMPISequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; +}; + +class MaxOfVectorMPIParallel : public ppc::core::Task { + public: + explicit MaxOfVectorMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res_{}; + boost::mpi::communicator world_; +}; + +} // namespace khovansky_d_max_of_vector_elements_mpi diff --git a/tasks/mpi/khovansky_d_max_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/khovansky_d_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..51fc8b76b59 --- /dev/null +++ b/tasks/mpi/khovansky_d_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,105 @@ +// Copyright 2024 Khovansky Dmitry +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/khovansky_d_max_of_vector_elements/include/ops_mpi.hpp" + +std::vector GetRandomVectorForMax(int sz, int left, int right) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector v(sz); + for (int i = 0; i < sz; i++) { + v[i] = gen() % (1 + right - left) + left; + } + return v; +} + +TEST(khovansky_d_max_of_vector_elements_mpi, run_pipeline) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10000000; + const int left = 0; + const int right = 100; + global_vec = GetRandomVectorForMax(count_size_vector, left, right); + global_vec[0] = 102; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto MaxOfVectorMPIParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(MaxOfVectorMPIParallel->validation(), true); + MaxOfVectorMPIParallel->pre_processing(); + MaxOfVectorMPIParallel->run(); + MaxOfVectorMPIParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MaxOfVectorMPIParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(102, global_res[0]); + } +} + +TEST(khovansky_d_max_of_vector_elements_mpi, run_task) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10000000; + const int left = 0; + const int right = 100; + global_vec = GetRandomVectorForMax(count_size_vector, left, right); + global_vec[0] = 102; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto MaxOfVectorMPIParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(MaxOfVectorMPIParallel->validation(), true); + MaxOfVectorMPIParallel->pre_processing(); + MaxOfVectorMPIParallel->run(); + MaxOfVectorMPIParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(MaxOfVectorMPIParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(102, global_res[0]); + } +} diff --git a/tasks/mpi/khovansky_d_max_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/khovansky_d_max_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..7e7594f80e9 --- /dev/null +++ b/tasks/mpi/khovansky_d_max_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,113 @@ +// Copyright 2024 Khovansky Dmitry +#include "mpi/khovansky_d_max_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +int VectorMax(std::vector> r) { + if (r.empty()) { + return 0; + } + + int max = r[0]; + for (size_t i = 1; i < r.size(); i++) { + if (r[i] > max) { + max = r[i]; + } + } + return max; +} + +bool khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPISequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + return true; +} + +bool khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPISequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPISequential::run() { + internal_order_test(); + if (input_.empty()) { + // Handle the case when the input vector is empty + return true; + } + res = VectorMax(input_); + return true; +} + +bool khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPISequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPIParallel::pre_processing() { + internal_order_test(); + return true; +} + +bool khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPIParallel::validation() { + internal_order_test(); + if (world_.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPIParallel::run() { + internal_order_test(); + unsigned int delta = 0; + if (world_.rank() == 0) { + delta = taskData->inputs_count[0] / world_.size(); + } + broadcast(world_, delta, 0); + + if (world_.rank() == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + for (int proc = 1; proc < world_.size(); proc++) { + world_.send(proc, 0, input_.data() + proc * delta, delta); + } + } + local_input_ = std::vector(delta); + if (world_.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world_.recv(0, 0, local_input_.data(), delta); + } + if (local_input_.empty()) { + // Handle the case when the local input vector is empty + return true; + } + int max = VectorMax(local_input_); + + reduce(world_, max, res_, boost::mpi::maximum(), 0); + return true; +} + +bool khovansky_d_max_of_vector_elements_mpi::MaxOfVectorMPIParallel::post_processing() { + internal_order_test(); + if (world_.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/seq/khovansky_d_max_of_vector_elements/func_tests/main.cpp b/tasks/seq/khovansky_d_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..e3049a3e21c --- /dev/null +++ b/tasks/seq/khovansky_d_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,338 @@ +// Copyright 2024 Khovansky Dmitry +#include + +#include +#include + +#include "seq/khovansky_d_max_of_vector_elements/include/ops_seq.hpp" + +std::vector GetRandomVectorForMax(int sz, int left, int right) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector v(sz); + for (int i = 0; i < sz; i++) { + v[i] = gen() % (1 + right - left) + left; + } + return v; +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_10_Positive_Numbers) { + const int count = 10; + const int left = 0; + const int right = 1000; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_10_Negative_Numbers) { + const int count = 10; + const int left = -1000; + const int right = -1; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_20_Positive_Numbers) { + const int count = 20; + const int left = 0; + const int right = 1000; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_20_Negative_Numbers) { + const int count = 20; + const int left = -1000; + const int right = -1; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_50_Positive_Numbers) { + const int count = 50; + const int left = 0; + const int right = 100; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_50_Negative_Numbers) { + const int count = 50; + const int left = -1000; + const int right = -1; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_70_Positive_Numbers) { + const int count = 70; + const int left = 0; + const int right = 100; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_70_Negative_Numbers) { + const int count = 70; + const int left = -1000; + const int right = -1; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_100_Positive_Numbers) { + const int count = 100; + const int left = 0; + const int right = 100; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_100_Negative_Numbers) { + const int count = 100; + const int left = -1000; + const int right = -1; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_1000_Positive_Numbers) { + const int count = 1000; + const int left = 0; + const int right = 100; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_1000_Negative_Numbers) { + const int count = 1000; + const int left = -1000; + const int right = -1; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + int ex = *std::max_element(in.begin(), in.end()); + ASSERT_EQ(ex, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements_seq, Test_Max_Empty_Vector) { + // Create data + std::vector in = {}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq MaxOfVectorSeq(taskDataSeq); + ASSERT_EQ(MaxOfVectorSeq.validation(), true); + MaxOfVectorSeq.pre_processing(); + MaxOfVectorSeq.run(); + MaxOfVectorSeq.post_processing(); + ASSERT_EQ(0, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/khovansky_d_max_of_vector_elements/include/ops_seq.hpp b/tasks/seq/khovansky_d_max_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..61cb18c1b97 --- /dev/null +++ b/tasks/seq/khovansky_d_max_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,23 @@ +// Copyright 2024 Khovansky Dmitry +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace khovansky_d_max_of_vector_elements_seq { + +class MaxOfVectorSeq : public ppc::core::Task { + public: + explicit MaxOfVectorSeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; +}; +} // namespace khovansky_d_max_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/khovansky_d_max_of_vector_elements/perf_tests/main.cpp b/tasks/seq/khovansky_d_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..bf0a4acf4d3 --- /dev/null +++ b/tasks/seq/khovansky_d_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,175 @@ +// Copyright 2024 Khovansky Dmitry +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/khovansky_d_max_of_vector_elements/include/ops_seq.hpp" + +std::vector GetRandomVectorForMax(int sz, int left, int right) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector v(sz); + for (int i = 0; i < sz; i++) { + v[i] = gen() % (1 + right - left) + left; + } + return v; +} + +TEST(khovansky_d_max_of_vector_elements, test_max_of_vector_with_positive_numbers) { + const int count = 10000; + const int left = 0; + const int right = 1000000; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + in[0] = 1000002; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto MaxOfVectorSeq = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MaxOfVectorSeq); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1000002, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements, test_max_of_vector_with_positive_numbers_long) { + const int count = 10000000; + const int left = 0; + const int right = 1000000; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + in[0] = 1000002; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto MaxOfVectorSeq = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MaxOfVectorSeq); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1000002, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements, test_max_of_vector_with_negative_numbers) { + const int count = 10000; + const int left = -1000000; + const int right = -1; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + in[0] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto MaxOfVectorSeq = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MaxOfVectorSeq); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(khovansky_d_max_of_vector_elements, test_max_of_vector_with_negative_numbers_long) { + const int count = 10000000; + const int left = -1000000; + const int right = -1; + // Create data + std::vector in = GetRandomVectorForMax(count, left, right); + in[0] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto MaxOfVectorSeq = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MaxOfVectorSeq); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/khovansky_d_max_of_vector_elements/src/ops_seq.cpp b/tasks/seq/khovansky_d_max_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..9ded4b6bb1b --- /dev/null +++ b/tasks/seq/khovansky_d_max_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,51 @@ +// Copyright 2024 Khovansky Dmitry +#include "seq/khovansky_d_max_of_vector_elements/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +int VectorMax(std::vector> r) { + if (r.empty()) { + return 0; + } + + int max = r[0]; + for (size_t i = 1; i < r.size(); i++) { + if (r[i] > max) { + max = r[i]; + } + } + + return max; +} + +bool khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp, tmp + taskData->inputs_count[0], input_.begin()); + + return true; +} + +bool khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq::run() { + internal_order_test(); + res = VectorMax(input_); + return true; +} + +bool khovansky_d_max_of_vector_elements_seq::MaxOfVectorSeq::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From d82701482c9629bb648cb42305c1e289592a821a Mon Sep 17 00:00:00 2001 From: Grudzin Konstantin <113104424+Konstantin-Grudzin@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:36:48 +0300 Subject: [PATCH 091/155] =?UTF-8?q?=D0=93=D1=80=D1=83=D0=B4=D0=B7=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=9A=D0=BE=D0=BD=D1=81=D1=82=D0=B0=D0=BD=D1=82=D0=B8?= =?UTF-8?q?=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=207.=20=D0=9D=D0=B0=D1=85?= =?UTF-8?q?=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BD=D0=B0=D0=B8?= =?UTF-8?q?=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5=20=D0=B1=D0=BB=D0=B8=D0=B7=D0=BA?= =?UTF-8?q?=D0=B8=D1=85=20=D0=BF=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D1=8E=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8?= =?UTF-8?q?=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#126)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Основная идея:** Представим ответ в виде пары чисел - значения и самого индекса. Если мы по определению сравнения пар будем брать минимум, мы получим самый левый индекс с минимальным значением. **Последовательная реализация:** Мы проходимся по массиву и находим минимальный элемент среди пар "абсолютная разница соседних" и индексом. **Реализация MPI:** Мы разделяем наши массивы на несколько равных частей и отдаем каждому процессу свою часть массива. Он вычисляет минимум и с помощью операции reduce передает эту информацию главному процессу. Т.к мы проверяем ещё и следующий элемент, каждому процессу добавляем по одному дополнительному элементу. Чтобы избежать выход за массивы, а также корректно вычислять ответ, передаем каждому потоку точку его старта и размер действительных данных. --- .../func_tests/main.cpp | 309 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 +++ .../perf_tests/main.cpp | 90 +++++ .../src/ops_mpi.cpp | 101 ++++++ .../func_tests/main.cpp | 133 ++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 85 +++++ .../src/ops_seq.cpp | 36 ++ 8 files changed, 826 insertions(+) create mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp create mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..880dd20042f --- /dev/null +++ b/tasks/mpi/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,309 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp" + +namespace grudzin_k_nearest_neighbor_elements_mpi { + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = -100 + gen() % 201; + } + return vec; +} + +} // namespace grudzin_k_nearest_neighbor_elements_mpi + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Wrong_Test) { + boost::mpi::communicator world; + std::vector global_vec(1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_10k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_1k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_2k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 2000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_4k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 4000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_3k) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 3000; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, Test_3) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 3; + global_vec = grudzin_k_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_ans[0], global_ans[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..7591263e048 --- /dev/null +++ b/tasks/mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace grudzin_k_nearest_neighbor_elements_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::pair res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + std::pair res; + size_t size; + size_t start; + boost::mpi::communicator world; +}; + +} // namespace grudzin_k_nearest_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..342f90c6b7c --- /dev/null +++ b/tasks/mpi/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,90 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp" + +TEST(grudzin_k_nearest_neighbor_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_ans(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 5000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); + taskDataPar->outputs_count.emplace_back(global_ans.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_ans[0]); + } +} + +TEST(grudzin_k_nearest_neighbor_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 50000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_sum[0]); + } +} diff --git a/tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..f60565363c7 --- /dev/null +++ b/tasks/mpi/grudzin_k_nearest_neighbor_elements/src/ops_mpi.cpp @@ -0,0 +1,101 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/grudzin_k_nearest_neighbor_elements/include/ops_mpi.hpp" + +#include +#include +#include + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + // Init value for output + res = {INT_MAX, -1}; + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size() - 1; ++i) { + std::pair tmp = {abs(input_[i] - input_[i + 1]), i}; + res = std::min(res, tmp); + } + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res.second; + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + // Init value for output + res = {INT_MAX, -1}; + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = (taskData->inputs_count[0]) / world.size(); + size = taskData->inputs_count[0]; + if (taskData->inputs_count[0] % world.size() > 0u) delta++; + } + broadcast(world, delta, 0); + broadcast(world, size, 0); + + if (world.rank() == 0) { + // Init vectors + input_ = std::vector(world.size() * delta + 2, 0); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta + 1); + } + } + + local_input_ = std::vector(delta + 1); + start = world.rank() * delta; + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta + 1); + } else { + world.recv(0, 0, local_input_.data(), delta + 1); + } + + std::pair local_ans_ = {INT_MAX, -1}; + for (size_t i = 0; i < local_input_.size() - 1 && (i + start) < size - 1; ++i) { + std::pair tmp = {abs(local_input_[i] - local_input_[i + 1]), i + start}; + local_ans_ = std::min(local_ans_, tmp); + } + reduce(world, local_ans_, res, boost::mpi::minimum>(), 0); + return true; +} + +bool grudzin_k_nearest_neighbor_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res.second; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..dfd38c04fa4 --- /dev/null +++ b/tasks/seq/grudzin_k_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,133 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(grudzin_k_nearest_neighbor_elements_seq, Wrong_Test) { + std::vector in = {2}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_Lazy) { + // Create data + std::vector in = {2, 3}; + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_24) { + // Create data + std::vector in = {2, 3, 4, 1, 7, 3, 2, 9, -15, 3}; + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_40) { + // Create data + std::vector in = {2, 3, 4, 1, 7, 3, 2, 9, -15, 3, -1, 5, 8, 5, 12, 9, 24, 12, + 2, 3, 4, 1, 7, 3, 2, 9, -15, 3, -1, 5, 8, 5, 12, 9, 24, 12}; + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_60) { + // Create data + std::vector in(100, 0); + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, Test_Close_Negative) { + // Create data + std::vector in = {-1, -3, -5, -4, -2}; + std::vector out(1, 0); + int ans = 2; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..9f91d411f6d --- /dev/null +++ b/tasks/seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace grudzin_k_nearest_neighbor_elements_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_{}; + std::pair res{}; +}; + +} // namespace grudzin_k_nearest_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..556e2672056 --- /dev/null +++ b/tasks/seq/grudzin_k_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,85 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(grudzin_k_nearest_neighbor_elements_seq, test_pipeline_run) { + int size = 10000000; + // Create data + std::vector in(size); + std::vector out(1, 0); + int ans = 0; + for (int i = 0; i < size; ++i) { + in[i] = 3 * i; + } + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out[0]); +} + +TEST(grudzin_k_nearest_neighbor_elements_seq, test_task_run) { + int size = 10000000; + // Create data + std::vector in(size); + std::vector out(1, 0); + int ans = 0; + for (int i = 0; i < size; ++i) { + in[i] = 2 * i; + } + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out[0]); +} diff --git a/tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp b/tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..89d66a1ddf4 --- /dev/null +++ b/tasks/seq/grudzin_k_nearest_neighbor_elements/src/ops_seq.cpp @@ -0,0 +1,36 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/grudzin_k_nearest_neighbor_elements/include/ops_seq.hpp" + +#include + +bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + // Init value for output + res = {INT_MAX, -1}; + return true; +} + +bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; +} + +bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size() - 1; i++) { + std::pair tmp = {abs(input_[i] - input_[i + 1]), i}; + res = std::min(res, tmp); + } + return true; +} + +bool grudzin_k_nearest_neighbor_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res.second; + return true; +} From 7021ad16f37f89af9ad95943be963fbcd0838e42 Mon Sep 17 00:00:00 2001 From: MatveyKurakin <113084585+MatveyKurakin@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:38:04 +0300 Subject: [PATCH 092/155] =?UTF-8?q?=D0=9A=D1=83=D1=80=D0=B0=D0=BA=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=9C=D0=B0=D1=82=D0=B2=D0=B5=D0=B9.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2017.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB?= =?UTF-8?q?=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D1=80=D0=BE=D0=BA?= =?UTF-8?q?=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#?= =?UTF-8?q?127)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательного алгоритма: - Проходим по каждому элементу строки и определяем её минимум Описание MPI алгоритма: - Нулевой процесс распределяет данные среди всех процессов. Он делит матрицу, представленную как вектор, на части. Количество частей равно количеству процессов. - На этапе run() каждый процесс сам определяет границы строк и находит их минимумы. Если строка разбита на несколько процессов, то каждый процесс ищет минимум на своем участке, а затем, с помощью reduce, результат собирается в 0 процессе. --- .../func_tests/main.cpp | 555 ++++++++++++++++++ .../include/ops_mpi.hpp | 51 ++ .../perf_tests/main.cpp | 104 ++++ .../src/ops_mpi.cpp | 141 +++++ .../func_tests/main.cpp | 131 +++++ .../include/ops_seq.hpp | 26 + .../perf_tests/main.cpp | 100 ++++ .../src/ops_seq.cpp | 46 ++ 8 files changed, 1154 insertions(+) create mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..493bfbf1ff4 --- /dev/null +++ b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,555 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_1_1) { + int count_rows = 1; + int size_rows = 1; + boost::mpi::communicator world; + std::vector global_mat; + std::vector ans; + std::vector par_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = {5}; + ans = {5}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, ans); + ASSERT_EQ(par_min_vec, ans); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_5_1) { + int count_rows = 5; + int size_rows = 1; + boost::mpi::communicator world; + std::vector global_mat; + std::vector ans; + std::vector par_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = {5, 5, 5, 5, 5}; + ans = {5, 5, 5, 5, 5}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, ans); + ASSERT_EQ(par_min_vec, ans); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_1_5) { + int count_rows = 1; + int size_rows = 5; + boost::mpi::communicator world; + std::vector global_mat; + std::vector ans; + std::vector par_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = {5, 5, 5, 5, 5}; + ans = {5}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, ans); + ASSERT_EQ(par_min_vec, ans); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_3_5) { + int count_rows = 3; + int size_rows = 5; + boost::mpi::communicator world; + std::vector global_mat; + std::vector ans; + std::vector par_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = {1, 5, 3, 7, 9, 3, 4, 6, 7, 9, 2, 4, 2, 5, 0}; + ans = {1, 3, 0}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, ans); + ASSERT_EQ(par_min_vec, ans); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_3_6) { + int count_rows = 3; + int size_rows = 6; + boost::mpi::communicator world; + std::vector global_mat; + std::vector ans; + std::vector par_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9}; + ans = {3, 4, 2}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, ans); + ASSERT_EQ(par_min_vec, ans); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_4_5) { + int count_rows = 4; + int size_rows = 5; + boost::mpi::communicator world; + std::vector global_mat; + std::vector ans; + std::vector par_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9, 5, 8}; + ans = {3, 4, 2, 5}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, ans); + ASSERT_EQ(par_min_vec, ans); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_12) { + int count_rows = 10; + int size_rows = 12; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, par_min_vec); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_15) { + int count_rows = 10; + int size_rows = 15; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, par_min_vec); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_2) { + int count_rows = 10; + int size_rows = 2; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_min_vec, par_min_vec); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_0_0) { + int count_rows = 0; + int size_rows = 0; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Check_valdation) { + int count_rows = 10; + int size_rows = 10; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + + kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} \ No newline at end of file diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..f1eb1dc4e09 --- /dev/null +++ b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp @@ -0,0 +1,51 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kurakin_m_min_values_by_rows_matrix_mpi { + +std::vector getRandomVector(int sz); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int count_rows{}; + int size_rows{}; + std::vector input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int count_rows{}; + int size_rows{}; + int delta_proc{}; + std::vector input_, local_input_; + std::vector res; + boost::mpi::communicator world; +}; + +} // namespace kurakin_m_min_values_by_rows_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..a290fa90e1a --- /dev/null +++ b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,104 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" + +TEST(kurakin_m_min_values_by_rows_matrix_mpi_perf_test, test_pipeline_run) { + int count_rows = 100; + int size_rows = 400; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = std::vector(count_rows * size_rows, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < par_min_vec.size(); i++) { + EXPECT_EQ(1, par_min_vec[0]); + } + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_mpi_perf_test, test_task_run) { + int count_rows = 100; + int size_rows = 400; + boost::mpi::communicator world; + std::vector global_mat; + std::vector par_min_vec(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_mat = std::vector(count_rows * size_rows, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->inputs_count.emplace_back(global_mat.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataPar->inputs_count.emplace_back(static_cast(1)); + taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); + taskDataPar->outputs_count.emplace_back(par_min_vec.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < par_min_vec.size(); i++) { + EXPECT_EQ(1, par_min_vec[0]); + } + } +} diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..cc2079ef09e --- /dev/null +++ b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp @@ -0,0 +1,141 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + count_rows = (int)*taskData->inputs[1]; + size_rows = (int)*taskData->inputs[2]; + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + res = std::vector(count_rows, 0); + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + return taskData->inputs.size() == 3 && taskData->inputs_count.size() == 3 && taskData->outputs.size() == 1 && + taskData->outputs_count.size() == 1 && *taskData->inputs[1] != 0 && *taskData->inputs[2] != 0 && + *taskData->inputs[1] == taskData->outputs_count[0]; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + for (int i = 0; i < count_rows; i++) { + res[i] = *std::min_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + count_rows = (int)*taskData->inputs[1]; + size_rows = (int)*taskData->inputs[2]; + if (taskData->inputs_count[0] % world.size() == 0) { + delta_proc = taskData->inputs_count[0] / world.size(); + } else { + delta_proc = taskData->inputs_count[0] / world.size() + 1; + } + input_ = std::vector(delta_proc * world.size(), INT_MAX); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs.size() == 3 && taskData->inputs_count.size() == 3 && taskData->outputs.size() == 1 && + taskData->outputs_count.size() == 1 && *taskData->inputs[1] != 0 && *taskData->inputs[2] != 0 && + *taskData->inputs[1] == taskData->outputs_count[0]; + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + broadcast(world, count_rows, 0); + broadcast(world, size_rows, 0); + broadcast(world, delta_proc, 0); + + local_input_ = std::vector(delta_proc); + boost::mpi::scatter(world, input_.data(), local_input_.data(), delta_proc, 0); + + res = std::vector(count_rows, INT_MAX); + + unsigned int last_delta = 0; + if (world.rank() == world.size() - 1) { + last_delta = local_input_.size() * world.size() - size_rows * count_rows; + } + + unsigned int ind = std::min(world.rank() * local_input_.size() / size_rows, static_cast(count_rows - 1)); + + unsigned int delta = std::min(local_input_.size(), size_rows - world.rank() * local_input_.size() % size_rows); + std::vector local_res(count_rows, INT_MAX); + + local_res[ind] = *std::min_element(local_input_.begin(), local_input_.begin() + delta); + ++ind; + + unsigned int k = 0; + while (local_input_.begin() + delta + k * size_rows < local_input_.end() - last_delta) { + local_res[ind] = + *std::min_element(local_input_.begin() + delta + k * size_rows, + std::min(local_input_.end(), local_input_.begin() + delta + (k + 1) * size_rows)); + ++k; + ++ind; + } + + for (unsigned int i = 0; i < res.size(); ++i) { + reduce(world, local_res[i], res[i], boost::mpi::minimum(), 0); + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..480eccc6ab7 --- /dev/null +++ b/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,131 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min1) { + int count_rows; + int size_rows; + + // Create data + count_rows = 3; + size_rows = 5; + std::vector global_mat = {1, 5, 3, 7, 9, 3, 4, 6, 7, 9, 2, 4, 2, 5, 0}; + + std::vector seq_min_vec(count_rows, 0); + std::vector ans = {1, 3, 0}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, seq_min_vec); +} + +TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min2) { + int count_rows; + int size_rows; + + // Create data + count_rows = 3; + size_rows = 6; + std::vector global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9}; + + std::vector seq_min_vec(count_rows, 0); + std::vector ans = {3, 4, 2}; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, seq_min_vec); +} + +TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min3) { + int count_rows; + int size_rows; + + // Create data + count_rows = 4; + size_rows = 5; + + std::vector global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9, 5, 8}; + + std::vector seq_min_vec(count_rows, 0); + std::vector ans = {3, 4, 2, 5}; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, seq_min_vec); +} + +TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min_null) { + int count_rows; + int size_rows; + // Create data + count_rows = 0; + size_rows = 0; + std::vector global_mat(count_rows * size_rows); + std::vector seq_min_vec(count_rows, 0); + std::vector ans(count_rows, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + // Create Task + kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(seq_min_vec, ans); +} \ No newline at end of file diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..6c4d04360c4 --- /dev/null +++ b/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kurakin_m_min_values_by_rows_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int count_rows{}; + int size_rows{}; + std::vector input_; + std::vector res; +}; + +} // namespace kurakin_m_min_values_by_rows_matrix_seq \ No newline at end of file diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..b07bae29d92 --- /dev/null +++ b/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,100 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(kurakin_m_min_values_by_rows_matrix_seq, test_pipeline_run) { + int count_rows; + int size_rows; + + // Create data + count_rows = 100; + size_rows = 400; + std::vector global_mat(count_rows * size_rows, 1); + std::vector seq_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (size_t i = 0; i < seq_min_vec.size(); i++) { + EXPECT_EQ(1, seq_min_vec[0]); + } +} + +TEST(kurakin_m_min_values_by_rows_matrix_seq, test_task_run) { + int count_rows; + int size_rows; + + // Create data + count_rows = 100; + size_rows = 400; + std::vector global_mat(count_rows * size_rows, 1); + std::vector seq_min_vec(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); + taskDataSeq->inputs_count.emplace_back(static_cast(1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < seq_min_vec.size(); i++) { + EXPECT_EQ(1, seq_min_vec[0]); + } +} diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..5efb2be011d --- /dev/null +++ b/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp @@ -0,0 +1,46 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" + +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return *taskData->inputs[1] == taskData->outputs_count[0]; +} + +bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + // Init value for output + count_rows = (int)*taskData->inputs[1]; + size_rows = (int)*taskData->inputs[2]; + res = std::vector(count_rows, 0); + + for (int i = 0; i < count_rows; i++) { + res[i] = *std::min_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); + } + return true; +} + +bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} From f2bb1896d4999026d4627f90b3016e8f5168ebf7 Mon Sep 17 00:00:00 2001 From: ascannel <113050263+ascannel@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:38:58 +0300 Subject: [PATCH 093/155] =?UTF-8?q?=D0=9B=D0=BE=D0=BF=D0=B0=D1=82=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=98=D0=BB=D1=8C=D1=8F.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2024.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D1=91=D1=82=20=D1=87?= =?UTF-8?q?=D0=B8=D1=81=D0=BB=D0=B0=20=D1=81=D0=BB=D0=BE=D0=B2=20=D0=B2=20?= =?UTF-8?q?=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.=20(#128)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit performance tests fixes --- .../func_tests/countWordsFuncTests.cpp | 224 ++++++++++++++++++ .../include/countWordsMPIHeader.hpp | 48 ++++ .../perf_tests/countWordsPerfTests.cpp | 72 ++++++ .../src/countWordsMPI.cpp | 123 ++++++++++ .../func_tests/countWordsFuncTests.cpp | 120 ++++++++++ .../include/countWordsSeqHeader.hpp | 26 ++ .../perf_tests/countWordsPerfTests.cpp | 66 ++++++ .../src/countWordsSeq.cpp | 65 +++++ 8 files changed, 744 insertions(+) create mode 100644 tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp create mode 100644 tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp create mode 100644 tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp create mode 100644 tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp create mode 100644 tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp create mode 100644 tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp create mode 100644 tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp create mode 100644 tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp diff --git a/tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp b/tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp new file mode 100644 index 00000000000..cd794521543 --- /dev/null +++ b/tasks/mpi/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp @@ -0,0 +1,224 @@ +#include + +#include "mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp" + +TEST(lopatin_i_count_words_mpi, test_empty_string) { + boost::mpi::communicator world; + std::vector input = {}; + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_FALSE(testTaskParallel.validation()); + } +} + +TEST(lopatin_i_count_words_mpi, test_3_chars) { + boost::mpi::communicator world; + std::vector input; + std::string testString = "sym"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + } + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector referenceWordCount(1, 0); + std::shared_ptr taskDataSequential = std::make_shared(); + + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], referenceWordCount[0]); + } +} + +TEST(lopatin_i_count_words_mpi, test_3_words) { + boost::mpi::communicator world; + std::vector input; + std::string testString = "three funny words"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + } + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector referenceWordCount(1, 0); + std::shared_ptr taskDataSequential = std::make_shared(); + + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], referenceWordCount[0]); + } +} + +TEST(lopatin_i_count_words_mpi, test_300_words) { + boost::mpi::communicator world; + std::vector input = lopatin_i_count_words_mpi::generateLongString(20); + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + } + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector referenceWordCount(1, 0); + std::shared_ptr taskDataSequential = std::make_shared(); + + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], referenceWordCount[0]); + } +} + +TEST(lopatin_i_count_words_mpi, test_1500_words) { + boost::mpi::communicator world; + std::vector input = lopatin_i_count_words_mpi::generateLongString(100); + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + } + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector referenceWordCount(1, 0); + std::shared_ptr taskDataSequential = std::make_shared(); + + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], referenceWordCount[0]); + } +} + +TEST(lopatin_i_count_words_mpi, test_6k_words) { + boost::mpi::communicator world; + std::vector input = lopatin_i_count_words_mpi::generateLongString(400); + std::vector wordCount(1, 0); + + std::shared_ptr taskDataParallel = std::make_shared(); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataParallel->inputs_count.emplace_back(input.size()); + taskDataParallel->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataParallel->outputs_count.emplace_back(wordCount.size()); + } + + lopatin_i_count_words_mpi::TestMPITaskParallel testTaskParallel(taskDataParallel); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector referenceWordCount(1, 0); + std::shared_ptr taskDataSequential = std::make_shared(); + + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordCount.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordCount.size()); + + lopatin_i_count_words_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], referenceWordCount[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp b/tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp new file mode 100644 index 00000000000..43056f5ff4a --- /dev/null +++ b/tasks/mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace lopatin_i_count_words_mpi { + +std::vector generateLongString(int n); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int wordCount{}; + int spaceCount{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector localInput_; + int wordCount{}; + int spaceCount{}; + int localSpaceCount{}; + int chunkSize{}; + boost::mpi::communicator world; +}; + +} // namespace lopatin_i_count_words_mpi \ No newline at end of file diff --git a/tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp b/tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp new file mode 100644 index 00000000000..cb9e8d2701d --- /dev/null +++ b/tasks/mpi/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp @@ -0,0 +1,72 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp" + +std::vector testData = lopatin_i_count_words_mpi::generateLongString(2000); + +TEST(lopatin_i_count_words_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector input = testData; + std::vector wordCount(1, 0); + + std::shared_ptr taskData = std::make_shared(); + + if (world.rank() == 0) { + taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskData->outputs_count.emplace_back(wordCount.size()); + } + + auto testTask = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(wordCount[0], 30000); + } +} + +TEST(lopatin_i_count_words_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector input = testData; + std::vector wordCount(1, 0); + + std::shared_ptr taskData = std::make_shared(); + + if (world.rank() == 0) { + taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskData->outputs_count.emplace_back(wordCount.size()); + } + + auto testTask = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(wordCount[0], 30000); + } +} \ No newline at end of file diff --git a/tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp b/tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp new file mode 100644 index 00000000000..b02d5d47faf --- /dev/null +++ b/tasks/mpi/lopatin_i_count_words/src/countWordsMPI.cpp @@ -0,0 +1,123 @@ +#include "mpi/lopatin_i_count_words/include/countWordsMPIHeader.hpp" + +namespace lopatin_i_count_words_mpi { + +std::vector generateLongString(int n) { + std::vector testData; + std::string testString = "This is a long sentence for performance testing of the word count algorithm using MPI. "; + for (int i = 0; i < n - 1; i++) { + for (unsigned long int j = 0; j < testString.length(); j++) { + testData.push_back(testString[j]); + } + } + std::string lastSentence = "This is a long sentence for performance testing of the word count algorithm using MPI."; + for (unsigned long int j = 0; j < lastSentence.length(); j++) { + testData.push_back(lastSentence[j]); + } + return testData; +} + +bool TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tempPtr[i]; + } + return true; +} + +bool TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool TestMPITaskSequential::run() { + internal_order_test(); + for (char c : input_) { + if (c == ' ') { + spaceCount++; + } + } + wordCount = spaceCount + 1; + return true; +} + +bool TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = wordCount; + return true; +} + +bool TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tmpPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned long int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmpPtr[i]; + } + } + return true; +} + +bool TestMPITaskParallel::validation() { + internal_order_test(); + return (world.rank() == 0) ? (taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1) : true; +} + +bool TestMPITaskParallel::run() { + internal_order_test(); + + unsigned long totalSize = 0; + if (world.rank() == 0) { + totalSize = input_.size(); + chunkSize = taskData->inputs_count[0] / world.size(); + } + boost::mpi::broadcast(world, chunkSize, 0); + boost::mpi::broadcast(world, totalSize, 0); + + unsigned long startPos = world.rank() * chunkSize; + unsigned long actualChunkSize = (startPos + chunkSize <= totalSize) ? chunkSize : (totalSize - startPos); + + localInput_.resize(actualChunkSize); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + unsigned long procStartPos = proc * chunkSize; + unsigned long procChunkSize = (procStartPos + chunkSize <= totalSize) ? chunkSize : (totalSize - procStartPos); + if (procChunkSize > 0) { + world.send(proc, 0, input_.data() + procStartPos, procChunkSize); + } + } + localInput_.assign(input_.begin(), input_.begin() + actualChunkSize); + } else { + if (actualChunkSize > 0) { + world.recv(0, 0, localInput_.data(), actualChunkSize); + } + } + + localSpaceCount = 0; + for (size_t i = 0; i < localInput_.size(); ++i) { + if (localInput_[i] == ' ') { + localSpaceCount++; + } + } + + boost::mpi::reduce(world, localSpaceCount, spaceCount, std::plus<>(), 0); + + if (world.rank() == 0) { + wordCount = spaceCount + 1; + } + return true; +} + +bool TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = wordCount; + } + return true; +} + +} // namespace lopatin_i_count_words_mpi \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp b/tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp new file mode 100644 index 00000000000..b01df8d6841 --- /dev/null +++ b/tasks/seq/lopatin_i_count_words/func_tests/countWordsFuncTests.cpp @@ -0,0 +1,120 @@ +#include + +#include "seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp" + +TEST(lopatin_i_count_words_seq, test_empty_string) { + std::vector input = {}; + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), false); +} + +TEST(lopatin_i_count_words_seq, test_1_word) { + std::vector input; + std::string testString = "sym"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 1); +} + +TEST(lopatin_i_count_words_seq, test_3_words) { + std::vector input; + std::string testString = "three funny words"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 3); +} + +TEST(lopatin_i_count_words_seq, test_300_words) { + std::vector input = lopatin_i_count_words_seq::generateLongString(20); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 300); +} + +TEST(lopatin_i_count_words_seq, test_1500_words) { + std::vector input = lopatin_i_count_words_seq::generateLongString(100); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 1500); +} + +TEST(lopatin_i_count_words_seq, test_6k_words) { + std::vector input = lopatin_i_count_words_seq::generateLongString(400); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + lopatin_i_count_words_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 6000); +} \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp b/tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp new file mode 100644 index 00000000000..2e3bd3560bc --- /dev/null +++ b/tasks/seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace lopatin_i_count_words_seq { +std::vector generateLongString(int n); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int wordCount{}; +}; + +} // namespace lopatin_i_count_words_seq \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp b/tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp new file mode 100644 index 00000000000..2eb3819f497 --- /dev/null +++ b/tasks/seq/lopatin_i_count_words/perf_tests/countWordsPerfTests.cpp @@ -0,0 +1,66 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp" + +std::vector testData = lopatin_i_count_words_seq::generateLongString(2000); + +TEST(lopatin_i_count_words_seq, test_pipeline_run) { + std::vector input = testData; + std::vector word_count(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); + taskData->outputs_count.emplace_back(word_count.size()); + + auto testTask = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(word_count[0], 30000); +} + +TEST(lopatin_i_count_words_seq, test_task_run) { + std::vector input = testData; + std::vector word_count(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); + taskData->outputs_count.emplace_back(word_count.size()); + + auto testTask = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(word_count[0], 30000); +} \ No newline at end of file diff --git a/tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp b/tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp new file mode 100644 index 00000000000..20ed6fc707a --- /dev/null +++ b/tasks/seq/lopatin_i_count_words/src/countWordsSeq.cpp @@ -0,0 +1,65 @@ +#include "seq/lopatin_i_count_words/include/countWordsSeqHeader.hpp" + +namespace lopatin_i_count_words_seq { + +std::vector generateLongString(int n) { + std::vector testData; + std::string testString = "This is a long sentence for performance testing of the word count algorithm using MPI. "; + for (int i = 0; i < n - 1; i++) { + for (unsigned long int j = 0; j < testString.length(); j++) { + testData.push_back(testString[j]); + } + } + std::string lastSentence = "This is a long sentence for performance testing of the word count algorithm using MPI."; + for (unsigned long int j = 0; j < lastSentence.length(); j++) { + testData.push_back(lastSentence[j]); + } + return testData; +} + +bool lopatin_i_count_words_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tempPtr[i]; + } + return true; +} + +bool lopatin_i_count_words_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool lopatin_i_count_words_seq::TestTaskSequential::run() { + internal_order_test(); + + wordCount = 0; + bool inWord = false; + + for (size_t i = 0; i < input_.size(); ++i) { + if (input_[i] == ' ') { + if (inWord) { + wordCount++; + inWord = false; + } + } else if (!inWord) { + inWord = true; + } + } + + if (inWord) { + wordCount++; + } + + return true; +} + +bool lopatin_i_count_words_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = wordCount; + return true; +} + +} // namespace lopatin_i_count_words_seq \ No newline at end of file From 48d0c5ee120acf0621c562b15da1ba364bd62380 Mon Sep 17 00:00:00 2001 From: odincovm <112872592+odincovm@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:39:24 +0300 Subject: [PATCH 094/155] =?UTF-8?q?=D0=9E=D0=B4=D0=B8=D0=BD=D1=86=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=9C=D0=B8=D1=85=D0=B0=D0=B8=D0=BB.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2027=20=D0=9F=D0=BE=D0=B4=D1=81?= =?UTF-8?q?=D1=87=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D0=BD?= =?UTF-8?q?=D0=B5=D1=81=D0=BE=D0=B2=D0=BF=D0=B0=D0=B4=D0=B0=D1=8E=D1=89?= =?UTF-8?q?=D0=B8=D1=85=20=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B4=D0=B2=D1=83=D1=85=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=20?= =?UTF-8?q?(#131)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательная версия: мы сортируем строки, так чтобы если длины не равны то длина 1 была больше, чем вторая. Затем проходимся по циклу и на каждой итерации сравниваем символы, если различны то к ответу прибавляем 2, если 2 строка закончилась мы идем дальше по 1 строке на каждой итерации прибавляя по единице В параллельной версии алгоритм меняется тем, что мы уравниваем длины строк и разницу прибавляем к ответу. Затем разбиваем строки на несколько частей и по частям отдаем их потокам и проходим тот же алгоритм, что и а последовательной версии --------- Co-authored-by: Rerard Co-authored-by: Michael K. <130953568+kmichaelk@users.noreply.github.com> --- .../func_tests/main.cpp | 263 ++++++++++++++++++ .../include/ops_mpi.hpp | 40 +++ .../perf_tests/main.cpp | 90 ++++++ .../src/ops_mpi.cpp | 137 +++++++++ .../func_tests/main.cpp | 108 +++++++ .../include/ops_seq.hpp | 23 ++ .../perf_tests/main.cpp | 88 ++++++ .../src/ops_seq.cpp | 48 ++++ 8 files changed, 797 insertions(+) create mode 100644 tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp create mode 100644 tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp create mode 100644 tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp create mode 100644 tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/src/ops_mpi.cpp create mode 100644 tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp create mode 100644 tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp create mode 100644 tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp create mode 100644 tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/src/ops_seq.cpp diff --git a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp new file mode 100644 index 00000000000..d091ad4dd05 --- /dev/null +++ b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp @@ -0,0 +1,263 @@ + +#include + +#include +#include +#include + +#include "mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp" + +std::string get_random_str(size_t sz) { + const char characters[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrswxyz0123456789"; + std::string str; + + std::srand(std::time(nullptr)); + + for (size_t i = 0; i < sz; ++i) { + // Генерируем случайный индекс + int index = std::rand() % (sizeof(characters) - 1); + str = characters[index]; + } + + return str; +} +TEST(Parallel_MPI_count, sz_0) { + // Create data// + boost::mpi::communicator com; + char str1[] = ""; + char str2[] = ""; + std::vector in{str1, str2}; + std::vector out(1, 1); + std::vector out_s(1, 1); + // Create Task Data Parallel + std::shared_ptr taskDataPar = std::make_shared(); + if (com.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); + ASSERT_EQ(testClassPar.validation(), true); + testClassPar.pre_processing(); + testClassPar.run(); + testClassPar.post_processing(); + + if (com.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); + taskDataSeq->outputs_count.emplace_back(out_s.size()); + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); + ASSERT_EQ(testClassSeq.validation(), true); + testClassSeq.pre_processing(); + testClassSeq.run(); + testClassSeq.post_processing(); + ASSERT_EQ(out[0], out_s[0]); + } +} + +TEST(Parallel_MPI_count, sz_1) { + // Create data// + boost::mpi::communicator com; + std::string s1 = get_random_str(1); + std::string s2 = get_random_str(1); + std::vector in{s1.data(), s2.data()}; + std::vector out(1, 1); + std::vector out_s(1, 1); + // Create Task Data Parallel + std::shared_ptr taskDataPar = std::make_shared(); + if (com.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); + ASSERT_EQ(testClassPar.validation(), true); + testClassPar.pre_processing(); + testClassPar.run(); + testClassPar.post_processing(); + + if (com.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); + taskDataSeq->outputs_count.emplace_back(out_s.size()); + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); + ASSERT_EQ(testClassSeq.validation(), true); + testClassSeq.pre_processing(); + testClassSeq.run(); + testClassSeq.post_processing(); + ASSERT_EQ(out[0], out_s[0]); + } +} +TEST(Parallel_MPI_count, sz_36) { + // Create data// + boost::mpi::communicator com; + std::string s1 = get_random_str(36); + std::string s2 = get_random_str(36); + std::vector in{s1.data(), s2.data()}; + std::vector out(1, 1); + std::vector out_s(1, 1); + // Create Task Data Parallel + std::shared_ptr taskDataPar = std::make_shared(); + if (com.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); + ASSERT_EQ(testClassPar.validation(), true); + testClassPar.pre_processing(); + testClassPar.run(); + testClassPar.post_processing(); + + if (com.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); + taskDataSeq->outputs_count.emplace_back(out_s.size()); + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); + ASSERT_EQ(testClassSeq.validation(), true); + testClassSeq.pre_processing(); + testClassSeq.run(); + testClassSeq.post_processing(); + ASSERT_EQ(out[0], out_s[0]); + } +} + +TEST(Parallel_MPI_count, sz_24) { + // Create data + boost::mpi::communicator com; + std::vector out_s(1, 1); + std::string s1 = get_random_str(24); + std::string s2 = get_random_str(24); + + std::vector in{s1.data(), s2.data()}; + std::vector out(1, 1); + + // Create Task Data Parallel + std::shared_ptr taskDataPar = std::make_shared(); + if (com.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); + ASSERT_EQ(testClassPar.validation(), true); + testClassPar.pre_processing(); + testClassPar.run(); + testClassPar.post_processing(); + + if (com.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); + taskDataSeq->outputs_count.emplace_back(out_s.size()); + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); + ASSERT_EQ(testClassSeq.validation(), true); + testClassSeq.pre_processing(); + testClassSeq.run(); + testClassSeq.post_processing(); + ASSERT_EQ(out[0], out_s[0]); + } +} +TEST(Parallel_MPI_count, df_sz_15) { + // Create data// + boost::mpi::communicator com; + std::string s1 = get_random_str(12); + std::string s2 = get_random_str(12); + + std::vector in{s1.data(), s2.data()}; + std::vector out(1, 1); + std::vector out_s(1, 1); + // Create Task Data Parallel + std::shared_ptr taskDataPar = std::make_shared(); + if (com.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); + ASSERT_EQ(testClassPar.validation(), true); + testClassPar.pre_processing(); + testClassPar.run(); + testClassPar.post_processing(); + + if (com.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); + taskDataSeq->outputs_count.emplace_back(out_s.size()); + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); + ASSERT_EQ(testClassSeq.validation(), true); + testClassSeq.pre_processing(); + testClassSeq.run(); + testClassSeq.post_processing(); + ASSERT_EQ(out[0], out_s[0]); + } +} +TEST(Parallel_MPI_count, df_sz_25) { + // Create data// + boost::mpi::communicator com; + std::string s1 = get_random_str(13); + std::string s2 = get_random_str(12); + + std::vector in{s1.data(), s2.data()}; + std::vector out(1, 1); + std::vector out_s(1, 1); + // Create Task Data Parallel + std::shared_ptr taskDataPar = std::make_shared(); + if (com.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); + ASSERT_EQ(testClassPar.validation(), true); + testClassPar.pre_processing(); + testClassPar.run(); + testClassPar.post_processing(); + + if (com.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); + taskDataSeq->outputs_count.emplace_back(out_s.size()); + Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); + ASSERT_EQ(testClassSeq.validation(), true); + testClassSeq.pre_processing(); + testClassSeq.run(); + testClassSeq.post_processing(); + ASSERT_EQ(out[0], out_s[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp new file mode 100644 index 00000000000..61b8a46297b --- /dev/null +++ b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp @@ -0,0 +1,40 @@ + +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" +namespace Odintsov_M_CountingMismatchedCharactersStr_mpi { + +class CountingCharacterMPISequential : public ppc::core::Task { + public: + explicit CountingCharacterMPISequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input; + int ans{}; +}; + +class CountingCharacterMPIParallel : public ppc::core::Task { + public: + explicit CountingCharacterMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector local_input; + std::vector input; + int ans{}; + boost::mpi::communicator com; +}; +} // namespace Odintsov_M_CountingMismatchedCharactersStr_mpi \ No newline at end of file diff --git a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp new file mode 100644 index 00000000000..fea00e10845 --- /dev/null +++ b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp @@ -0,0 +1,90 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp" + +TEST(MPI_parallel_perf_test, my_test_pipeline_run) { + boost::mpi::communicator com; + char str1[] = "qbrkyndjjobh"; + char str2[] = "qellowhwmvpt"; + std::vector in{str1, str2}; + std::vector out(1, 1); + + // Create Task Data Parallel + std::shared_ptr taskDataPar = std::make_shared(); + if (com.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + // Create Task + auto testClassPar = + std::make_shared(taskDataPar); + ASSERT_EQ(testClassPar->validation(), true); + testClassPar->pre_processing(); + testClassPar->run(); + testClassPar->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testClassPar); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (com.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(10, out[0]); + } +} +TEST(MPI_parallel_perf_test, my_test_task_run) { + boost::mpi::communicator com; + char str1[] = "qbrkyndjjobh"; + char str2[] = "qellowhwmvpt"; + std::vector in{str1, str2}; + std::vector out(1, 1); + + // Create Task Data Parallel// + std::shared_ptr taskDataPar = std::make_shared(); + if (com.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + // Create Task + auto testClassPar = + std::make_shared(taskDataPar); + ASSERT_EQ(testClassPar->validation(), true); + testClassPar->pre_processing(); + testClassPar->run(); + testClassPar->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testClassPar); + perfAnalyzer->task_run(perfAttr, perfResults); + if (com.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(10, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/src/ops_mpi.cpp b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/src/ops_mpi.cpp new file mode 100644 index 00000000000..a6952bdfa5a --- /dev/null +++ b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/src/ops_mpi.cpp @@ -0,0 +1,137 @@ + +#include "mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp" + +#include +#include +#include +#include + +using namespace std::chrono_literals; +using namespace Odintsov_M_CountingMismatchedCharactersStr_mpi; + +// Последовательная версия +bool CountingCharacterMPISequential::validation() { + internal_order_test(); + // Проверка на то, что у нас 2 строки на входе и одно число на выходе + bool ans_out = (taskData->inputs_count[0] == 2); + bool ans_in = (taskData->outputs_count[0] == 1); + return (ans_in) && (ans_out); +} +bool CountingCharacterMPISequential::pre_processing() { + internal_order_test(); + // инициализация инпута + if (strlen(reinterpret_cast(taskData->inputs[0])) >= strlen(reinterpret_cast(taskData->inputs[1]))) { + input.push_back(reinterpret_cast(taskData->inputs[0])); + input.push_back(reinterpret_cast(taskData->inputs[1])); + } else { + input.push_back(reinterpret_cast(taskData->inputs[1])); + input.push_back(reinterpret_cast(taskData->inputs[0])); + } + // Инициализация ответа + ans = 0; + return true; +} +bool CountingCharacterMPISequential::run() { + internal_order_test(); + auto *it1 = input[0]; + auto *it2 = input[1]; + while (*it1 != '\0' && *it2 != '\0') { + if (*it1 != *it2) { + ans += 2; + } + ++it1; + ++it2; + } + ans += std::strlen(it1) + std::strlen(it2); + return true; +} +bool CountingCharacterMPISequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = ans; + return true; +} +// Параллельная версия +bool CountingCharacterMPIParallel::validation() { + internal_order_test(); + // Проверка на то, что у нас 2 строки на входе и одно число на выходе + if (com.rank() == 0) { + bool ans_out = (taskData->inputs_count[0] == 2); + bool ans_in = (taskData->outputs_count[0] == 1); + return (ans_in) && (ans_out); + } + return true; +} + +bool CountingCharacterMPIParallel::pre_processing() { + internal_order_test(); + if (com.rank() == 0) { + // инициализация инпута + if (strlen(reinterpret_cast(taskData->inputs[0])) >= + strlen(reinterpret_cast(taskData->inputs[1]))) { + input.push_back(reinterpret_cast(taskData->inputs[0])); + input.push_back(reinterpret_cast(taskData->inputs[1])); + } else { + input.push_back(reinterpret_cast(taskData->inputs[1])); + input.push_back(reinterpret_cast(taskData->inputs[0])); + } + // Слчай если строки разной длины + if (strlen(input[0]) != (strlen(input[1]))) { + ans = strlen(input[0]) - strlen(input[1]); + input[0][strlen(input[1])] = '\0'; + } else { + ans = 0; + } + } + return true; +} +bool CountingCharacterMPIParallel::run() { + internal_order_test(); + // Пересылка + size_t loc_size = 0; + // Инициализация в 0 поток + if (com.rank() == 0) { + // Инициализация loc_size; + loc_size = (strlen(input[0]) + com.size() - 1) / + com.size(); // Округляем вверх, чтобы при большем количестве потоков loc_size = 1 + } + broadcast(com, loc_size, 0); + if (com.rank() == 0) { + for (int pr = 1; pr < com.size(); pr++) { + size_t send_size = + std::min(loc_size, strlen(input[0] - pr * loc_size)); // Ограничиваем размар отправляемых данных + com.send(pr, 0, input[0] + pr * loc_size, send_size); + com.send(pr, 0, input[1] + pr * loc_size, send_size); + } + } + if (com.rank() == 0) { + std::string str1(input[0], loc_size); + std::string str2(input[1], loc_size); + local_input.push_back(str1); + local_input.push_back(str2); + } else { + std::string str1('0', loc_size); + std::string str2('0', loc_size); + com.recv(0, 0, str1.data(), loc_size); + com.recv(0, 0, str2.data(), loc_size); + local_input.push_back(str1); + local_input.push_back(str2); + } + size_t size_1 = local_input[0].size(); + // Реализация + int loc_res = 0; + for (size_t i = 0; i < size_1; i++) { + if (local_input[0][i] != local_input[1][i]) { + loc_res += 2; + } + } + reduce(com, loc_res, ans, std::plus(), 0); + return true; +} + +bool CountingCharacterMPIParallel::post_processing() { + internal_order_test(); + if (com.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = ans; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp new file mode 100644 index 00000000000..76bf51d46c3 --- /dev/null +++ b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp @@ -0,0 +1,108 @@ + +#include + +#include + +#include "seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp" + +TEST(Sequential_count, ans_8) { + // Create data + + char str1[] = "qwert"; + char str2[] = "qello"; + + std::vector in{str1, str2}; + std::vector out(1, 1); + + // Create TaskData// + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential testClass(taskDataSeq); + ASSERT_EQ(testClass.validation(), true); + testClass.pre_processing(); + testClass.run(); + testClass.post_processing(); + + ASSERT_EQ(8, out[0]); +} + +TEST(Sequential_count, ans_0) { + // Create data + char str1[] = "qwert"; + char str2[] = "qwert"; + std::vector in{str1, str2}; + std::vector out(1, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential testClass(taskDataSeq); + ASSERT_EQ(testClass.validation(), true); + testClass.pre_processing(); + testClass.run(); + testClass.post_processing(); + + ASSERT_EQ(0, out[0]); +} +TEST(Sequential_count, ans_10) { + // Create data + char str1[] = "qwert"; + + char str2[] = "asdfg"; + + std::vector in{str1, str2}; + std::vector out(1, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential testClass(taskDataSeq); + ASSERT_EQ(testClass.validation(), true); + testClass.pre_processing(); + testClass.run(); + testClass.post_processing(); + + ASSERT_EQ(10, out[0]); +} +TEST(Sequential_count, ans_11) { + // Create data + char str1[] = "qwerta"; + char str2[] = "asdfg"; + + std::vector in{str1, str2}; + std::vector out(1, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential testClass(taskDataSeq); + ASSERT_EQ(testClass.validation(), true); + testClass.pre_processing(); + testClass.run(); + testClass.post_processing(); + ASSERT_EQ(11, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp new file mode 100644 index 00000000000..db67e1d7825 --- /dev/null +++ b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp @@ -0,0 +1,23 @@ + +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" +namespace Odintsov_M_CountingMismatchedCharactersStr_seq { + +class CountingCharacterSequential : public ppc::core::Task { + public: + explicit CountingCharacterSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input; + int ans{}; +}; + +} // namespace Odintsov_M_CountingMismatchedCharactersStr_seq \ No newline at end of file diff --git a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp new file mode 100644 index 00000000000..6a57c5a9b47 --- /dev/null +++ b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp @@ -0,0 +1,88 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp" +TEST(sequential_my_perf_test, my_test_pipeline_run) { + // Create data + char str1[] = "qwert"; + char str2[] = "qello"; + + std::vector in{str1, str2}; + std::vector out(1, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testClass = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 15; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testClass); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(8, out[0]); +} + +TEST(sequential_my_perf_test, test_task_run) { + char str1[] = "qwert"; + char str2[] = "qello"; + + std::vector in{str1, str2}; + std::vector out(1, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testClass = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 15; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testClass); + perfAnalyzer->task_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(8, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/src/ops_seq.cpp b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/src/ops_seq.cpp new file mode 100644 index 00000000000..91571ced318 --- /dev/null +++ b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/src/ops_seq.cpp @@ -0,0 +1,48 @@ + +#include "seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential::validation() { + internal_order_test(); + // Проверка на то, что у нас 2 строки на входе и одно число на выходе + bool ans_out = (taskData->inputs_count[0] == 2); + bool ans_in = (taskData->outputs_count[0] == 1); + return (ans_in) && (ans_out); +} +bool Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential::pre_processing() { + internal_order_test(); + // инициализация инпута + if (strlen(reinterpret_cast(taskData->inputs[0])) >= strlen(reinterpret_cast(taskData->inputs[1]))) { + input.push_back(reinterpret_cast(taskData->inputs[0])); + input.push_back(reinterpret_cast(taskData->inputs[1])); + } else { + input.push_back(reinterpret_cast(taskData->inputs[1])); + input.push_back(reinterpret_cast(taskData->inputs[0])); + } + // Инициализация ответа + ans = 0; + return true; +} +bool Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential::run() { + internal_order_test(); + auto *it1 = input[0]; + auto *it2 = input[1]; + while (*it1 != '\0' && *it2 != '\0') { + if (*it1 != *it2) { + ans += 2; + } + ++it1; + ++it2; + } + ans += std::strlen(it1) + std::strlen(it2); + return true; +} +bool Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = ans; + return true; +} From 334bdd28ad760ef7859dbf9d43192881eaf448bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=95=D0=BB=D0=B8=D0=B7=D0=B0=D0=B2=D0=B5=D1=82=D0=B0=20?= =?UTF-8?q?=D0=A4=D1=80=D0=BE=D0=BB=D0=BE=D0=B2=D0=B0?= <113036050+ElizavetaFrolova@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:40:12 +0300 Subject: [PATCH 095/155] =?UTF-8?q?=D0=A4=D1=80=D0=BE=D0=BB=D0=BE=D0=B2?= =?UTF-8?q?=D0=B0=20=D0=95=D0=BB=D0=B8=D0=B7=D0=B0=D0=B2=D0=B5=D1=82=D0=B0?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2022.=20=D0=9F=D0=BE=D0=B4=D1=81?= =?UTF-8?q?=D1=87=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D0=B1?= =?UTF-8?q?=D1=83=D0=BA=D0=B2=D0=B5=D0=BD=D0=BD=D1=8B=D1=85=20=D1=81=D0=B8?= =?UTF-8?q?=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE=D0=B2=20=D0=B2=20=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=BE=D0=BA=D0=B5=20(#137)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательного решения (seq): Входные данные записываются в строку input_. С помощью цикла выполняется обход строки при обнаружении буквенного символа счетчик увеличивается на единицу. Выходные данные: число- количество буквенных символов в строке. Описание параллельной задачи: Входная строка равномерно делится между процессами. В случае если длина строки не кратна количеству процессов , оставшиеся символы добавляются к подстроке нулевого процесса. В каждом процессе происходит подсчет буквенных символов далее эти значения суммируются (при помощи reduce). --- .../func_tests/main.cpp | 204 ++++++++++++++++++ .../include/ops_mpi.hpp | 45 ++++ .../perf_tests/main.cpp | 110 ++++++++++ .../frolova_e_num_of_letters/src/ops_mpi.cpp | 95 ++++++++ .../func_tests/main.cpp | 152 +++++++++++++ .../include/ops_seq.hpp | 26 +++ .../perf_tests/main.cpp | 103 +++++++++ .../frolova_e_num_of_letters/src/ops_seq.cpp | 38 ++++ 8 files changed, 773 insertions(+) create mode 100644 tasks/mpi/frolova_e_num_of_letters/func_tests/main.cpp create mode 100644 tasks/mpi/frolova_e_num_of_letters/include/ops_mpi.hpp create mode 100644 tasks/mpi/frolova_e_num_of_letters/perf_tests/main.cpp create mode 100644 tasks/mpi/frolova_e_num_of_letters/src/ops_mpi.cpp create mode 100644 tasks/seq/frolova_e_num_of_letters/func_tests/main.cpp create mode 100644 tasks/seq/frolova_e_num_of_letters/include/ops_seq.hpp create mode 100644 tasks/seq/frolova_e_num_of_letters/perf_tests/main.cpp create mode 100644 tasks/seq/frolova_e_num_of_letters/src/ops_seq.cpp diff --git a/tasks/mpi/frolova_e_num_of_letters/func_tests/main.cpp b/tasks/mpi/frolova_e_num_of_letters/func_tests/main.cpp new file mode 100644 index 00000000000..9b4ee0d493b --- /dev/null +++ b/tasks/mpi/frolova_e_num_of_letters/func_tests/main.cpp @@ -0,0 +1,204 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/frolova_e_num_of_letters/include/ops_mpi.hpp" + +std::string GenStr(int n) { + if (n <= 0) { + return std::string(); + } + std::string str = "test"; + std::string result; + result.resize(n); + + int i = 0; + size_t j = 0; + + while (i < n) { + result[i] = str[j]; + j++; + i++; + if (j >= str.size()) { + j = 0; + } + } + return result; +} + +TEST(frolova_e_num_of_letters_mpi, returns_empty_str_) { + std::string str = GenStr(-2); + EXPECT_TRUE(str.empty()); + std::string str2 = GenStr(0); + EXPECT_TRUE(str2.empty()); +} + +TEST(frolova_e_num_of_letters_mpi, Test_100_symbols) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_ = 100; + global_str = GenStr(count_size_); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + frolova_e_num_of_letters_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + // Create Task + frolova_e_num_of_letters_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(frolova_e_num_of_letters_mpi, Test_with_number) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_ = 240; + global_str = GenStr(count_size_); + global_str.push_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + } + frolova_e_num_of_letters_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + // Create Task + frolova_e_num_of_letters_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_diff[0], global_diff[0]); + } +} + +TEST(frolova_e_num_of_letters_mpi, Test_only_numbers) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_str = "1234567890"; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + } + frolova_e_num_of_letters_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + // Create Task + frolova_e_num_of_letters_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_diff[0], global_diff[0]); + } +} + +TEST(frolova_e_num_of_letters_mpi, Test_empty_str) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + frolova_e_num_of_letters_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(frolova_e_num_of_letters_mpi, Test_different_symbols) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_str = "1234567890;;-a"; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + } + frolova_e_num_of_letters_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + // Create Task + frolova_e_num_of_letters_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_diff[0], global_diff[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/frolova_e_num_of_letters/include/ops_mpi.hpp b/tasks/mpi/frolova_e_num_of_letters/include/ops_mpi.hpp new file mode 100644 index 00000000000..0dc705a1b0e --- /dev/null +++ b/tasks/mpi/frolova_e_num_of_letters/include/ops_mpi.hpp @@ -0,0 +1,45 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace frolova_e_num_of_letters_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + std::string local_input_{}; + int res{}; + boost::mpi::communicator world; +}; +} // namespace frolova_e_num_of_letters_mpi \ No newline at end of file diff --git a/tasks/mpi/frolova_e_num_of_letters/perf_tests/main.cpp b/tasks/mpi/frolova_e_num_of_letters/perf_tests/main.cpp new file mode 100644 index 00000000000..7185d29716e --- /dev/null +++ b/tasks/mpi/frolova_e_num_of_letters/perf_tests/main.cpp @@ -0,0 +1,110 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/frolova_e_num_of_letters/include/ops_mpi.hpp" + +std::string GenStr(int n) { + if (n <= 0) { + return std::string(); + } + std::string str = "test"; + std::string result; + result.resize(n); + + int i = 0; + size_t j = 0; + + while (i < n) { + result[i] = str[j]; + j++; + i++; + if (j >= str.size()) { + j = 0; + } + } + return result; +} + +TEST(frolova_e_num_of_letters, test_pipeline_run) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_; + if (world.rank() == 0) { + count_size_ = 120; + global_str = GenStr(count_size_); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_, global_sum[0]); + } +} + +TEST(frolova_e_num_of_letters, test_task_run) { + boost::mpi::communicator world; + std::string global_str; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_; + if (world.rank() == 0) { + count_size_ = 120; + global_str = GenStr(count_size_); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_, global_sum[0]); + } +} diff --git a/tasks/mpi/frolova_e_num_of_letters/src/ops_mpi.cpp b/tasks/mpi/frolova_e_num_of_letters/src/ops_mpi.cpp new file mode 100644 index 00000000000..56e33530c4b --- /dev/null +++ b/tasks/mpi/frolova_e_num_of_letters/src/ops_mpi.cpp @@ -0,0 +1,95 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/frolova_e_num_of_letters/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +bool frolova_e_num_of_letters_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + // Init value for output + res = 0; + return true; +} + +bool frolova_e_num_of_letters_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool frolova_e_num_of_letters_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (char c : input_) { + if (static_cast(isalpha(c))) res++; + } + return true; +} + +bool frolova_e_num_of_letters_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool frolova_e_num_of_letters_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + // Init vectors + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + } + res = 0; + return true; +} + +bool frolova_e_num_of_letters_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool frolova_e_num_of_letters_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int delta = 0; + + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + (proc - 1) * delta, delta); + } + local_input_ = std::string(input_.begin() + (world.size() - 1) * delta, input_.end()); + } else { + local_input_.resize(delta); + world.recv(0, 0, local_input_.data(), delta); + } + int local_res = 0; + for (char c : local_input_) { + if (static_cast(isalpha(c))) { + local_res++; + } + } + + reduce(world, local_res, res, std::plus(), 0); + return true; +} + +bool frolova_e_num_of_letters_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/frolova_e_num_of_letters/func_tests/main.cpp b/tasks/seq/frolova_e_num_of_letters/func_tests/main.cpp new file mode 100644 index 00000000000..a6e1ce169ce --- /dev/null +++ b/tasks/seq/frolova_e_num_of_letters/func_tests/main.cpp @@ -0,0 +1,152 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/frolova_e_num_of_letters/include/ops_seq.hpp" + +std::string GenStr(int n) { + if (n <= 0) { + return std::string(); + } + std::string str = "test"; + std::string result; + result.resize(n); + + int i = 0; + size_t j = 0; + + while (i < n) { + result[i] = str[j]; + j++; + i++; + if (j >= str.size()) { + j = 0; + } + } + return result; +} + +TEST(frolova_e_num_of_letters_seq, returns_empty_str_) { + std::string str = GenStr(-2); + EXPECT_TRUE(str.empty()); + std::string str2 = GenStr(0); + EXPECT_TRUE(str2.empty()); +} + +TEST(frolova_e_num_of_letters_seq, returns__str_) { + std::string str = GenStr(2); + unsigned long size = 2; + ASSERT_EQ(str.size(), size); +} + +TEST(frolova_e_num_of_letters_seq, empty_str_test) { + std::string str; + + // Create data + std::vector in(1, str); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + frolova_e_num_of_letters_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(frolova_e_num_of_letters_seq, str_without_letters_test) { + std::string str = "0"; + + // Create data + std::vector in(1, str); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + frolova_e_num_of_letters_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(frolova_e_num_of_letters_seq, str_with_one_letter_test) { + std::string str = "a"; + + // Create data + std::vector in(1, str); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + frolova_e_num_of_letters_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(frolova_e_num_of_letters_seq, str_with_letters_test) { + std::string str = "test"; + + // Create data + std::vector in(1, str); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + frolova_e_num_of_letters_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(4, out[0]); +} + +TEST(frolova_e_num_of_letters_seq, str_with_letters_and_other_symbols_test) { + std::string str = "123test;"; + + // Create data + std::vector in(1, str); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + frolova_e_num_of_letters_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(4, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/frolova_e_num_of_letters/include/ops_seq.hpp b/tasks/seq/frolova_e_num_of_letters/include/ops_seq.hpp new file mode 100644 index 00000000000..b16888f1ddc --- /dev/null +++ b/tasks/seq/frolova_e_num_of_letters/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace frolova_e_num_of_letters_seq { + +int Count(std::string& str); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int res{}; +}; + +} // namespace frolova_e_num_of_letters_seq \ No newline at end of file diff --git a/tasks/seq/frolova_e_num_of_letters/perf_tests/main.cpp b/tasks/seq/frolova_e_num_of_letters/perf_tests/main.cpp new file mode 100644 index 00000000000..f4dcf5a4f97 --- /dev/null +++ b/tasks/seq/frolova_e_num_of_letters/perf_tests/main.cpp @@ -0,0 +1,103 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/frolova_e_num_of_letters/include/ops_seq.hpp" + +std::string GenStr(int n) { + if (n <= 0) { + return std::string(); + } + std::string str = "test"; + std::string result; + result.resize(n); + + int i = 0; + size_t j = 0; + + while (i < n) { + result[i] = str[j]; + j++; + i++; + if (j >= str.size()) { + j = 0; + } + } + return result; +} + +TEST(frolova_e_num_of_letters_seq, test_pipeline_run) { + std::string str = GenStr(100); + + // Create data + std::vector in(1, str); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(100, out[0]); +} + +TEST(frolova_e_num_of_letters_seq, test_task_run) { + std::string str = GenStr(100); + + // Create data + std::vector in(1, str); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(100, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/frolova_e_num_of_letters/src/ops_seq.cpp b/tasks/seq/frolova_e_num_of_letters/src/ops_seq.cpp new file mode 100644 index 00000000000..7236b55c63d --- /dev/null +++ b/tasks/seq/frolova_e_num_of_letters/src/ops_seq.cpp @@ -0,0 +1,38 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/frolova_e_num_of_letters/include/ops_seq.hpp" + +int frolova_e_num_of_letters_seq::Count(std::string& str) { + int count = 0; + for (char c : str) { + if (static_cast(isalpha(c))) { + count++; + } + } + return count; +} + +bool frolova_e_num_of_letters_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + res = 0; + return true; +} + +bool frolova_e_num_of_letters_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool frolova_e_num_of_letters_seq::TestTaskSequential::run() { + internal_order_test(); + res = Count(input_); + return true; +} + +bool frolova_e_num_of_letters_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} \ No newline at end of file From 9a2a0e05951bc3579b7f74efc7e4c3daf10cf22a Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 4 Nov 2024 11:05:18 +0800 Subject: [PATCH 096/155] =?UTF-8?q?Revert=20"=D0=A1=D0=BE=D0=B7=D0=BE?= =?UTF-8?q?=D0=BD=D0=BE=D0=B2=20=D0=98=D0=BB=D1=8C=D1=8F.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=207.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5?= =?UTF-8?q?=D0=B5=20=D0=B1=D0=BB=D0=B8=D0=B7=D0=BA=D0=B8=D1=85=20=D1=81?= =?UTF-8?q?=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5?= =?UTF-8?q?=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82?= =?UTF-8?q?=D0=BE=D1=80=D0=B0."=20(#184)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#93 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11657142180/job/32454256839 image image --- .../func_tests/main.cpp | 323 ------------------ .../include/ops_mpi.hpp | 45 --- .../perf_tests/main.cpp | 95 ------ .../src/ops_mpi.cpp | 108 ------ .../func_tests/main.cpp | 153 --------- .../include/ops_seq.hpp | 23 -- .../perf_tests/main.cpp | 87 ----- .../src/ops_seq.cpp | 43 --- 8 files changed, 877 deletions(-) delete mode 100644 tasks/mpi/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp delete mode 100644 tasks/mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp delete mode 100644 tasks/mpi/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp delete mode 100644 tasks/mpi/sozonov_i_nearest_neighbor_elements/src/ops_mpi.cpp delete mode 100644 tasks/seq/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp delete mode 100644 tasks/seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp delete mode 100644 tasks/seq/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp delete mode 100644 tasks/seq/sozonov_i_nearest_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp b/tasks/mpi/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp deleted file mode 100644 index 6d96709c987..00000000000 --- a/tasks/mpi/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp +++ /dev/null @@ -1,323 +0,0 @@ -#include - -#include -#include -#include -#include - -#include "mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp" - -namespace sozonov_i_nearest_neighbor_elements_mpi { - -std::vector getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -} // namespace sozonov_i_nearest_neighbor_elements_mpi - -TEST(sozonov_i_nearest_neighbor_elements_mpi, test_for_empty_vector) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(2, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_FALSE(testMpiTaskParallel.validation()); - } -} - -TEST(sozonov_i_nearest_neighbor_elements_mpi, test_on_10_elements) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(2, 0); - std::vector ans; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 10; - global_vec = std::vector(count_size_vector); - std::iota(global_vec.begin(), global_vec.end(), 0); - global_vec[0] = 1; - ans = {1, 1}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_sum(2, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_sum, ans); - ASSERT_EQ(global_ans, ans); - } -} - -TEST(sozonov_i_nearest_neighbor_elements_mpi, test_on_50_elements) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(2, 0); - std::vector ans; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 50; - global_vec = std::vector(count_size_vector); - std::iota(global_vec.begin(), global_vec.end(), 0); - global_vec[0] = 1; - ans = {1, 1}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_sum(2, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_sum, ans); - ASSERT_EQ(global_ans, ans); - } -} - -TEST(sozonov_i_nearest_neighbor_elements_mpi, test_on_500_elements) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(2, 0); - std::vector ans; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 500; - global_vec = std::vector(count_size_vector); - std::iota(global_vec.begin(), global_vec.end(), 0); - global_vec[0] = 1; - ans = {1, 1}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_sum(2, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_sum, ans); - ASSERT_EQ(global_ans, ans); - } -} - -TEST(sozonov_i_nearest_neighbor_elements_mpi, test_on_1000_elements) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(2, 0); - std::vector ans; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 1000; - global_vec = std::vector(count_size_vector); - std::iota(global_vec.begin(), global_vec.end(), 0); - global_vec[0] = 1; - ans = {1, 1}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_sum(2, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_sum, ans); - ASSERT_EQ(global_ans, ans); - } -} - -TEST(sozonov_i_nearest_neighbor_elements_mpi, test_random_on_500_elements) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(2, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 500; - global_vec = sozonov_i_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_sum(2, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_sum, global_ans); - } -} - -TEST(sozonov_i_nearest_neighbor_elements_mpi, test_random_on_1000_elements) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(2, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int count_size_vector = 1000; - global_vec = sozonov_i_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_sum(2, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); - taskDataSeq->outputs_count.emplace_back(reference_sum.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_sum, global_ans); - } -} \ No newline at end of file diff --git a/tasks/mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp deleted file mode 100644 index 91fa439a73b..00000000000 --- a/tasks/mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp +++ /dev/null @@ -1,45 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace sozonov_i_nearest_neighbor_elements_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - int res{}; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - std::vector> diff, local_input_; - std::pair res; - boost::mpi::communicator world; -}; - -} // namespace sozonov_i_nearest_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp deleted file mode 100644 index 5042f0371a8..00000000000 --- a/tasks/mpi/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp +++ /dev/null @@ -1,95 +0,0 @@ -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp" - -TEST(sozonov_i_nearest_neighbor_elements_mpi, test_pipeline_run) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(2, 0); - std::vector ans(2, 1); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_vector; - if (world.rank() == 0) { - count_size_vector = 1000000; - global_vec = std::vector(count_size_vector); - std::iota(global_vec.begin(), global_vec.end(), 0); - global_vec[0] = 1; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ans, global_ans); - } -} - -TEST(sozonov_i_nearest_neighbor_elements_mpi, test_task_run) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_ans(2, 0); - std::vector ans(2, 1); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_vector; - if (world.rank() == 0) { - count_size_vector = 10000000; - global_vec = std::vector(count_size_vector); - std::iota(global_vec.begin(), global_vec.end(), 0); - global_vec[0] = 1; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_ans.data())); - taskDataPar->outputs_count.emplace_back(global_ans.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ans, global_ans); - } -} \ No newline at end of file diff --git a/tasks/mpi/sozonov_i_nearest_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/sozonov_i_nearest_neighbor_elements/src/ops_mpi.cpp deleted file mode 100644 index 7ed936e151b..00000000000 --- a/tasks/mpi/sozonov_i_nearest_neighbor_elements/src/ops_mpi.cpp +++ /dev/null @@ -1,108 +0,0 @@ -#include "mpi/sozonov_i_nearest_neighbor_elements/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; ++i) { - input_[i] = tmp_ptr[i]; - } - // Init value for output - res = 0; - return true; -} - -bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - // Check count elements of input and output - return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 2; -} - -bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential::run() { - internal_order_test(); - int min = INT_MAX; - for (size_t i = 0; i < input_.size() - 1; i++) { - if (abs(input_[i + 1] - input_[i]) < min) { - min = abs(input_[i + 1] - input_[i]); - res = i; - } - } - return true; -} - -bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = input_[res]; - reinterpret_cast(taskData->outputs[0])[1] = input_[res + 1]; - return true; -} - -bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - if (world.rank() == 0) { - // Init vectors - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; ++i) { - input_[i] = tmp_ptr[i]; - } - } - // Init value for output - res = {INT_MAX, -1}; - return true; -} - -bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - // Check count elements of input and output - return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 2; - } - return true; -} - -bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel::run() { - internal_order_test(); - unsigned int delta = 0; - if (world.rank() == 0) { - delta = (taskData->inputs_count[0] - 1) / world.size(); - } - broadcast(world, delta, 0); - if (world.rank() == 0) { - diff = std::vector>(taskData->inputs_count[0] - 1); - for (size_t i = 0; i < input_.size() - 1; ++i) { - diff[i] = {abs(input_[i + 1] - input_[i]), i}; - } - for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, diff.data() + proc * delta, delta); - } - } - local_input_ = std::vector>(delta); - if (world.rank() == 0) { - local_input_ = std::vector>(diff.begin(), diff.begin() + delta); - } else { - world.recv(0, 0, local_input_.data(), delta); - } - std::pair local_res(INT_MAX, 0); - local_res = *std::min_element(local_input_.begin(), local_input_.end()); - reduce(world, local_res, res, boost::mpi::minimum>(), 0); - return true; -} - -bool sozonov_i_nearest_neighbor_elements_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = input_[res.second]; - reinterpret_cast(taskData->outputs[0])[1] = input_[res.second + 1]; - } - return true; -} diff --git a/tasks/seq/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp b/tasks/seq/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp deleted file mode 100644 index 3c0c892be20..00000000000 --- a/tasks/seq/sozonov_i_nearest_neighbor_elements/func_tests/main.cpp +++ /dev/null @@ -1,153 +0,0 @@ -#include - -#include -#include - -#include "seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp" - -TEST(sozonov_i_nearest_neighbor_elements_seq, test_for_empty_vector) { - // Create data - std::vector in; - std::vector out(2, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_FALSE(testTaskSequential.validation()); -} - -TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_10) { - const int count = 10; - - // Create data - std::vector in(count); - std::iota(in.begin(), in.end(), 0); - in[0] = 1; - std::vector out(2, 0); - std::vector ans(2, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out); -} - -TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_20) { - const int count = 20; - - // Create data - std::vector in(count); - std::iota(in.begin(), in.end(), 0); - in[0] = 1; - std::vector out(2, 0); - std::vector ans(2, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out); -} - -TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_50) { - const int count = 50; - - // Create data - std::vector in(count); - std::iota(in.begin(), in.end(), 0); - in[0] = 1; - std::vector out(2, 0); - std::vector ans(2, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out); -} - -TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_70) { - const int count = 70; - - // Create data - std::vector in(count); - std::iota(in.begin(), in.end(), 0); - in[0] = 1; - std::vector out(2, 0); - std::vector ans(2, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out); -} - -TEST(sozonov_i_nearest_neighbor_elements_seq, test_nearest_neighbor_elements_100) { - const int count = 100; - - // Create data - std::vector in(count); - std::iota(in.begin(), in.end(), 0); - in[0] = 1; - std::vector out(2, 0); - std::vector ans(2, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out); -} \ No newline at end of file diff --git a/tasks/seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp b/tasks/seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp deleted file mode 100644 index 12606c9cde9..00000000000 --- a/tasks/seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace sozonov_i_nearest_neighbor_elements_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - int res{}; -}; - -} // namespace sozonov_i_nearest_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/seq/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp deleted file mode 100644 index 1aeffb7978d..00000000000 --- a/tasks/seq/sozonov_i_nearest_neighbor_elements/perf_tests/main.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp" - -TEST(sozonov_i_nearest_neighbor_elements_seq, test_pipeline_run) { - const int count = 10000000; - - // Create data - std::vector in(count); - std::iota(in.begin(), in.end(), 0); - in[0] = 1; - std::vector out(2, 0); - std::vector ans(2, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ans, out); -} - -TEST(sozonov_i_nearest_neighbor_elements_seq, test_task_run) { - const int count = 10000000; - - // Create data - std::vector in(count); - std::iota(in.begin(), in.end(), 0); - in[0] = 1; - std::vector out(2, 0); - std::vector ans(2, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ans, out); -} \ No newline at end of file diff --git a/tasks/seq/sozonov_i_nearest_neighbor_elements/src/ops_seq.cpp b/tasks/seq/sozonov_i_nearest_neighbor_elements/src/ops_seq.cpp deleted file mode 100644 index 95677138ddd..00000000000 --- a/tasks/seq/sozonov_i_nearest_neighbor_elements/src/ops_seq.cpp +++ /dev/null @@ -1,43 +0,0 @@ -#include "seq/sozonov_i_nearest_neighbor_elements/include/ops_seq.hpp" - -#include - -using namespace std::chrono_literals; - -bool sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - // Init value for output - res = 0; - return true; -} - -bool sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential::validation() { - internal_order_test(); - // Check count elements of input and output - return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 2; -} - -bool sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential::run() { - internal_order_test(); - int min = INT_MAX; - for (size_t i = 0; i < input_.size() - 1; ++i) { - if (abs(input_[i + 1] - input_[i]) < min) { - min = abs(input_[i + 1] - input_[i]); - res = i; - } - } - return true; -} - -bool sozonov_i_nearest_neighbor_elements_seq::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = input_[res]; - reinterpret_cast(taskData->outputs[0])[1] = input_[res + 1]; - return true; -} \ No newline at end of file From adae4b6fac4e162591ea476378d3e6db81f8e9e0 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 4 Nov 2024 11:33:15 +0800 Subject: [PATCH 097/155] =?UTF-8?q?Revert=20"=D0=91=D0=B5=D1=81=D1=81?= =?UTF-8?q?=D0=BE=D0=BD=D0=BE=D0=B2=20=D0=95=D0=B3=D0=BE=D1=80.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=2021.=20=D0=98=D0=BD=D1=82=D0=B5?= =?UTF-8?q?=D0=B3=D1=80=D0=B8=D1=80=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B5=20?= =?UTF-8?q?=E2=80=93=20=D0=BC=D0=B5=D1=82=D0=BE=D0=B4=20=D0=9C=D0=BE=D0=BD?= =?UTF-8?q?=D1=82=D0=B5-=D0=9A=D0=B0=D1=80=D0=BB=D0=BE."=20(#185)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#108 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11657351299/job/32454799259 image image --- .../func_tests/main.cpp | 265 ------------------ .../include/ops_mpi.hpp | 48 ---- .../perf_tests/main.cpp | 81 ------ .../src/ops_mpi.cpp | 95 ------- .../func_tests/main.cpp | 87 ------ .../include/ops_seq.hpp | 26 -- .../perf_tests/main.cpp | 60 ---- .../src/ops_seq.cpp | 36 --- 8 files changed, 698 deletions(-) delete mode 100644 tasks/mpi/bessonov_e_integration_monte_carlo/func_tests/main.cpp delete mode 100644 tasks/mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp delete mode 100644 tasks/mpi/bessonov_e_integration_monte_carlo/perf_tests/main.cpp delete mode 100644 tasks/mpi/bessonov_e_integration_monte_carlo/src/ops_mpi.cpp delete mode 100644 tasks/seq/bessonov_e_integration_monte_carlo/func_tests/main.cpp delete mode 100644 tasks/seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp delete mode 100644 tasks/seq/bessonov_e_integration_monte_carlo/perf_tests/main.cpp delete mode 100644 tasks/seq/bessonov_e_integration_monte_carlo/src/ops_seq.cpp diff --git a/tasks/mpi/bessonov_e_integration_monte_carlo/func_tests/main.cpp b/tasks/mpi/bessonov_e_integration_monte_carlo/func_tests/main.cpp deleted file mode 100644 index d5f52f88155..00000000000 --- a/tasks/mpi/bessonov_e_integration_monte_carlo/func_tests/main.cpp +++ /dev/null @@ -1,265 +0,0 @@ -#include - -#include -#include -#include -#include - -#include "mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp" - -TEST(bessonov_e_integration_monte_carlo_mpi, PositiveRangeTestMPI) { - boost::mpi::communicator world; - std::vector global_result(1, 0.0); - std::shared_ptr taskDataPar = std::make_shared(); - double a = 0.0; - double b = 1.0; - int num_points = 1000000; - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); - } - bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_result(1, 0.0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); - bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); - } -} - -TEST(bessonov_e_integration_monte_carlo_mpi, NegativeRangeTestMPI) { - boost::mpi::communicator world; - std::vector global_result(1, 0.0); - std::shared_ptr taskDataPar = std::make_shared(); - double a = -1.0; - double b = 0.0; - int num_points = 100000; - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); - } - bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_result(1, 0.0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); - bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); - } -} - -TEST(bessonov_e_integration_monte_carlo_mpi, VerySmallRangeTestMPI) { - boost::mpi::communicator world; - std::vector global_result(1, 0.0); - std::shared_ptr taskDataPar = std::make_shared(); - double a = 0.1; - double b = 0.11; - int num_points = 100000; - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); - } - bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_result(1, 0.0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); - bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_NEAR(reference_result[0], global_result[0], 3e-8); - } -} - -TEST(bessonov_e_integration_monte_carlo_mpi, LongRangeTestMPI) { - boost::mpi::communicator world; - std::vector global_result(1, 0.0); - std::shared_ptr taskDataPar = std::make_shared(); - double a = -10.0; - double b = 15.0; - int num_points = 100000; - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); - } - bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_TRUE(testMpiTaskParallel.validation()); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_result(1, 0.0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); - bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_NEAR(reference_result[0], global_result[0], 1e3); - } -} - -TEST(bessonov_e_integration_monte_carlo_mpi, VeryLongRangeTestMPI) { - boost::mpi::communicator world; - std::vector global_result(1, 0.0); - std::shared_ptr taskDataPar = std::make_shared(); - double a = -40.0; - double b = 50.0; - int num_points = 1000000; - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); - } - bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_TRUE(testMpiTaskParallel.validation()); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_result(1, 0.0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); - bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_NEAR(reference_result[0], global_result[0], 3e4); - } -} - -TEST(bessonov_e_integration_monte_carlo_mpi, EqualRangeTestMPI) { - boost::mpi::communicator world; - std::vector global_result(1, 0.0); - std::shared_ptr taskDataPar = std::make_shared(); - double a = -2.0; - double b = 2.0; - int num_points = 100000; - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); - } - bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_TRUE(testMpiTaskParallel.validation()); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_result(1, 0.0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); - bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_NEAR(reference_result[0], global_result[0], 2e-1); - } -} - -TEST(bessonov_e_integration_monte_carlo_mpi, RandomTestMPI) { - boost::mpi::communicator world; - std::vector global_result(1, 0.0); - std::shared_ptr taskDataPar = std::make_shared(); - - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_real_distribution<> dis(-8.0, 8.0); - double a = dis(gen); - double b = dis(gen); - - if (a > b) std::swap(a, b); - - if (a == b) b += 1.0; - - int num_points = 100000; - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); - } - - bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_TRUE(testMpiTaskParallel.validation()); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_result(1, 0.0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); - - bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_NEAR(reference_result[0], global_result[0], 2e1); - } -} \ No newline at end of file diff --git a/tasks/mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp b/tasks/mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp deleted file mode 100644 index ce0f4bd1c6e..00000000000 --- a/tasks/mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp +++ /dev/null @@ -1,48 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace bessonov_e_integration_monte_carlo_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - double a, b; - int num_points; - static double exampl_func(double x) { return x * x * x; } - - private: - double res{}; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - double a, b; - int num_points; - static double exampl_func(double x) { return x * x * x; } - - private: - double res; - boost::mpi::communicator world; -}; - -} // namespace bessonov_e_integration_monte_carlo_mpi \ No newline at end of file diff --git a/tasks/mpi/bessonov_e_integration_monte_carlo/perf_tests/main.cpp b/tasks/mpi/bessonov_e_integration_monte_carlo/perf_tests/main.cpp deleted file mode 100644 index 03b17694113..00000000000 --- a/tasks/mpi/bessonov_e_integration_monte_carlo/perf_tests/main.cpp +++ /dev/null @@ -1,81 +0,0 @@ -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp" - -TEST(bessonov_e_integration_monte_carlo_mpi, test_pipeline_run) { - boost::mpi::communicator world; - std::vector global_result(1, 0.0); - double a = 0.0; - double b = 2.0; - int num_points = 100000000; - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); - } - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - std::vector reference_result(1, 0.0); - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); - bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); - } -} - -TEST(bessonov_e_integration_monte_carlo_mpi, test_task_run) { - boost::mpi::communicator world; - std::vector global_result(1, 0.0); - double a = 0.0; - double b = 2.0; - int num_points = 100000000; - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&num_points)); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); - } - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - double reference_result = 4.0; - ASSERT_NEAR(reference_result, global_result[0], 1e-1); - } -} \ No newline at end of file diff --git a/tasks/mpi/bessonov_e_integration_monte_carlo/src/ops_mpi.cpp b/tasks/mpi/bessonov_e_integration_monte_carlo/src/ops_mpi.cpp deleted file mode 100644 index b9fd6d74c15..00000000000 --- a/tasks/mpi/bessonov_e_integration_monte_carlo/src/ops_mpi.cpp +++ /dev/null @@ -1,95 +0,0 @@ -#include "mpi/bessonov_e_integration_monte_carlo/include/ops_mpi.hpp" - -bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - return (taskData->inputs.size() == 3 && taskData->outputs.size() == 1); -} - -bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - a = *reinterpret_cast(taskData->inputs[0]); - b = *reinterpret_cast(taskData->inputs[1]); - num_points = *reinterpret_cast(taskData->inputs[2]); - return true; -} - -bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential::run() { - internal_order_test(); - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_real_distribution<> dis(a, b); - - double sum = 0.0; - for (int i = 0; i < num_points; ++i) { - double x = dis(gen); - sum += exampl_func(x); - } - res = (b - a) * (sum / num_points); - return true; -} - -bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} - -bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - if ((taskData->inputs.size() != 3) || (taskData->outputs.size() != 1)) { - return false; - } - num_points = *reinterpret_cast(taskData->inputs[2]); - if (num_points <= 0) { - return false; - } - } - return true; -} - -bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - if (world.rank() == 0) { - a = *reinterpret_cast(taskData->inputs[0]); - b = *reinterpret_cast(taskData->inputs[1]); - num_points = *reinterpret_cast(taskData->inputs[2]); - } - - return true; -} - -bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - boost::mpi::broadcast(world, a, 0); - boost::mpi::broadcast(world, b, 0); - boost::mpi::broadcast(world, num_points, 0); - - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_real_distribution<> dis(a, b); - - int remainder = num_points % world.size(); - int num_points_for_process = num_points / world.size() + (world.rank() < remainder ? 1 : 0); - - double sum = 0.0; - for (int i = 0; i < num_points_for_process; ++i) { - double x = dis(gen); - sum += exampl_func(x); - } - - boost::mpi::reduce(world, sum, res, std::plus<>(), 0); - if (world.rank() == 0) { - res = (b - a) * res / num_points; - } - return true; -} - -bool bessonov_e_integration_monte_carlo_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - *reinterpret_cast(taskData->outputs[0]) = res; - } - return true; -} diff --git a/tasks/seq/bessonov_e_integration_monte_carlo/func_tests/main.cpp b/tasks/seq/bessonov_e_integration_monte_carlo/func_tests/main.cpp deleted file mode 100644 index 6327fd4a033..00000000000 --- a/tasks/seq/bessonov_e_integration_monte_carlo/func_tests/main.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include - -#include -#include - -#include "seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp" - -TEST(bessonov_e_integration_monte_carlo_seq, PositiveRangeTest) { - double a = 0.0; - double b = 1.0; - int num_points = 1000000; - auto taskData = std::make_shared(); - taskData->inputs.push_back(reinterpret_cast(&a)); - taskData->inputs.push_back(reinterpret_cast(&b)); - taskData->inputs.push_back(reinterpret_cast(&num_points)); - double output = 0.0; - taskData->outputs.push_back(reinterpret_cast(&output)); - bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); - ASSERT_TRUE(task.validation()); - task.pre_processing(); - task.run(); - task.post_processing(); - double expected_result = 0.25; - ASSERT_NEAR(output, expected_result, 1e-1); -} - -TEST(bessonov_e_integration_monte_carlo_seq, NegativeRangeTest) { - double a = -1.0; - double b = 0.0; - int num_points = 1000000; - auto taskData = std::make_shared(); - taskData->inputs.push_back(reinterpret_cast(&a)); - taskData->inputs.push_back(reinterpret_cast(&b)); - taskData->inputs.push_back(reinterpret_cast(&num_points)); - double output = 0.0; - taskData->outputs.push_back(reinterpret_cast(&output)); - bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); - ASSERT_TRUE(task.validation()); - task.pre_processing(); - task.run(); - task.post_processing(); - double expected_result = -0.25; - ASSERT_NEAR(output, expected_result, 1e-1); -} - -TEST(bessonov_e_integration_monte_carlo_seq, FullRangeTest) { - double a = -1.0; - double b = 2.0; - int num_points = 1000000; - auto taskData = std::make_shared(); - taskData->inputs.push_back(reinterpret_cast(&a)); - taskData->inputs.push_back(reinterpret_cast(&b)); - taskData->inputs.push_back(reinterpret_cast(&num_points)); - double output = 0.0; - taskData->outputs.push_back(reinterpret_cast(&output)); - bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); - ASSERT_TRUE(task.validation()); - task.pre_processing(); - task.run(); - task.post_processing(); - double expected_result = 3.75; - ASSERT_NEAR(output, expected_result, 1e-1); -} - -TEST(bessonov_e_integration_monte_carlo_seq, InputSizeLessThan3) { - auto taskData = std::make_shared(); - double a = 0.0; - double b = 1.0; - taskData->inputs.push_back(reinterpret_cast(&a)); - taskData->inputs.push_back(reinterpret_cast(&b)); - double output = 0.0; - taskData->outputs.push_back(reinterpret_cast(&output)); - bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); - ASSERT_FALSE(task.validation()); -} - -TEST(bessonov_e_integration_monte_carlo_seq, OutputSizeLessThan1) { - auto taskData = std::make_shared(); - double a = 0.0; - double b = 1.0; - int num_points = 10000; - taskData->inputs.push_back(reinterpret_cast(&a)); - taskData->inputs.push_back(reinterpret_cast(&b)); - taskData->inputs.push_back(reinterpret_cast(&num_points)); - bessonov_e_integration_monte_carlo_seq::TestTaskSequential task(taskData); - ASSERT_FALSE(task.validation()); -} \ No newline at end of file diff --git a/tasks/seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp b/tasks/seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp deleted file mode 100644 index 7165f3ef353..00000000000 --- a/tasks/seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace bessonov_e_integration_monte_carlo_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - double a, b; - int num_points; - static double exampl_func(double x) { return x * x * x; } - - private: - double res{}; -}; - -} // namespace bessonov_e_integration_monte_carlo_seq \ No newline at end of file diff --git a/tasks/seq/bessonov_e_integration_monte_carlo/perf_tests/main.cpp b/tasks/seq/bessonov_e_integration_monte_carlo/perf_tests/main.cpp deleted file mode 100644 index 6e843e530ff..00000000000 --- a/tasks/seq/bessonov_e_integration_monte_carlo/perf_tests/main.cpp +++ /dev/null @@ -1,60 +0,0 @@ -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp" - -TEST(bessonov_e_integration_monte_carlo_seq, TestPipelineRun) { - double a = 0.0; - double b = 2.0; - int num_points = 10000000; - auto taskData = std::make_shared(); - taskData->inputs.push_back(reinterpret_cast(&a)); - taskData->inputs.push_back(reinterpret_cast(&b)); - taskData->inputs.push_back(reinterpret_cast(&num_points)); - double output = 1.0; - taskData->outputs.push_back(reinterpret_cast(&output)); - auto testTaskSequential = std::make_shared(taskData); - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - double expected_result = 4.0; - ASSERT_NEAR(output, expected_result, 1e-1); -} - -TEST(bessonov_e_integration_monte_carlo_seq, TestTaskRun) { - double a = 0.0; - double b = 2.0; - int num_points = 10000000; - auto taskData = std::make_shared(); - taskData->inputs.push_back(reinterpret_cast(&a)); - taskData->inputs.push_back(reinterpret_cast(&b)); - taskData->inputs.push_back(reinterpret_cast(&num_points)); - double output = 1.0; - taskData->outputs.push_back(reinterpret_cast(&output)); - auto testTaskSequential = std::make_shared(taskData); - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - double expected_result = 4.0; - ASSERT_NEAR(output, expected_result, 1e-1); -} diff --git a/tasks/seq/bessonov_e_integration_monte_carlo/src/ops_seq.cpp b/tasks/seq/bessonov_e_integration_monte_carlo/src/ops_seq.cpp deleted file mode 100644 index eb63e632a6e..00000000000 --- a/tasks/seq/bessonov_e_integration_monte_carlo/src/ops_seq.cpp +++ /dev/null @@ -1,36 +0,0 @@ -#include "seq/bessonov_e_integration_monte_carlo/include/ops_seq.hpp" - -bool bessonov_e_integration_monte_carlo_seq::TestTaskSequential::validation() { - internal_order_test(); - return (taskData->inputs.size() == 3 && taskData->outputs.size() == 1); -} - -bool bessonov_e_integration_monte_carlo_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - a = *reinterpret_cast(taskData->inputs[0]); - b = *reinterpret_cast(taskData->inputs[1]); - num_points = *reinterpret_cast(taskData->inputs[2]); - return true; -} - -bool bessonov_e_integration_monte_carlo_seq::TestTaskSequential::run() { - internal_order_test(); - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_real_distribution<> dis(a, b); - - double sum = 0.0; - for (int i = 0; i < num_points; ++i) { - double x = dis(gen); - sum += exampl_func(x); - } - - res = (b - a) * (sum / num_points); - return true; -} - -bool bessonov_e_integration_monte_carlo_seq::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} From d692d2e8aca3e9b60ee049b905fff2aff715e11f Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 4 Nov 2024 21:12:39 +0800 Subject: [PATCH 098/155] =?UTF-8?q?Revert=20"=D0=95=D1=80=D0=BC=D0=B8?= =?UTF-8?q?=D0=BB=D0=BE=D0=B2=D0=B0=20=D0=94=D0=B0=D1=80=D1=8C=D1=8F.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2014.=20=D0=9C=D0=B8=D0=BD=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B?= =?UTF-8?q?."=20(#195)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#86 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11664190768/job/32474194575 image --- .../func_tests/main.cpp | 379 ------------------ .../include/ops_mpi.hpp | 49 --- .../perf_tests/main.cpp | 140 ------- .../src/ops_mpi.cpp | 125 ------ .../func_tests/main.cpp | 236 ----------- .../include/ops_seq.hpp | 24 -- .../perf_tests/main.cpp | 127 ------ .../src/ops_seq.cpp | 55 --- 8 files changed, 1135 deletions(-) delete mode 100644 tasks/mpi/ermilova_d_min_element_matrix/func_tests/main.cpp delete mode 100644 tasks/mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp delete mode 100644 tasks/mpi/ermilova_d_min_element_matrix/perf_tests/main.cpp delete mode 100644 tasks/mpi/ermilova_d_min_element_matrix/src/ops_mpi.cpp delete mode 100644 tasks/seq/ermilova_d_min_element_matrix/func_tests/main.cpp delete mode 100644 tasks/seq/ermilova_d_min_element_matrix/include/ops_seq.hpp delete mode 100644 tasks/seq/ermilova_d_min_element_matrix/perf_tests/main.cpp delete mode 100644 tasks/seq/ermilova_d_min_element_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/ermilova_d_min_element_matrix/func_tests/main.cpp b/tasks/mpi/ermilova_d_min_element_matrix/func_tests/main.cpp deleted file mode 100644 index e848d6ea49f..00000000000 --- a/tasks/mpi/ermilova_d_min_element_matrix/func_tests/main.cpp +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include -#include - -#include "mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp" - -std::vector getRandomVector(int size, int upper_border, int lower_border) { - std::random_device dev; - std::mt19937 gen(dev()); - if (size <= 0) throw "Incorrect size"; - std::vector vec(size); - for (int i = 0; i < size; i++) { - vec[i] = lower_border + gen() % (upper_border - lower_border + 1); - } - return vec; -} - -std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border) { - if (rows <= 0 || cols <= 0) throw "Incorrect size"; - std::vector> vec(rows); - for (int i = 0; i < rows; i++) { - vec[i] = getRandomVector(cols, upper_border, lower_border); - } - return vec; -} - -TEST(ermilova_d_min_element_matrix_mpi, Can_create_vector) { - const int size_test = 10; - const int upper_border_test = 100; - const int lower_border_test = -100; - EXPECT_NO_THROW(getRandomVector(size_test, upper_border_test, lower_border_test)); -} - -TEST(ermilova_d_min_element_matrix_mpi, Cant_create_incorrect_size_vector) { - const int size_test = -10; - const int upper_border_test = 100; - const int lower_border_test = -100; - EXPECT_ANY_THROW(getRandomVector(size_test, upper_border_test, lower_border_test)); -} - -TEST(ermilova_d_min_element_matrix_mpi, Can_create_matrix) { - const int rows_test = 10; - const int cols_test = 10; - const int upper_border_test = 100; - const int lower_border_test = -100; - EXPECT_NO_THROW(getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test)); -} - -TEST(ermilova_d_min_element_matrix_mpi, Cant_create_incorrect_size_matrix) { - const int rows_test = -10; - const int cols_test = 0; - const int upper_border_test = 100; - const int lower_border_test = -100; - EXPECT_ANY_THROW(getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test)); -} - -TEST(ermilova_d_min_element_matrix_mpi, Matrix_1x1) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min(1, INT_MAX); - - const int rows_test = 1; - const int cols_test = 1; - const int upper_border_test = 100; - const int lower_border_test = -100; - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count.emplace_back(rows_test); - taskDataPar->inputs_count.emplace_back(cols_test); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_min(1, INT_MAX); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - // Create Task - ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_min[0], global_min[0]); - } -} - -TEST(ermilova_d_min_element_matrix_mpi, Matrix_10x10) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min(1, INT_MAX); - const int rows_test = 10; - const int cols_test = 10; - const int upper_border_test = 100; - const int lower_border_test = -100; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count.emplace_back(rows_test); - taskDataPar->inputs_count.emplace_back(cols_test); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_min(1, INT_MAX); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - // Create Task - ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_min[0], global_min[0]); - } -} - -TEST(ermilova_d_min_element_matrix_mpi, Matrix_100x100) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min(1, INT_MAX); - const int rows_test = 100; - const int cols_test = 100; - const int upper_border_test = 500; - const int lower_border_test = -500; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count.emplace_back(rows_test); - taskDataPar->inputs_count.emplace_back(cols_test); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_min(1, INT_MAX); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - // Create Task - ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_min[0], global_min[0]); - } -} - -TEST(ermilova_d_min_element_matrix_mpi, Matrix_100x50) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min(1, INT_MAX); - const int rows_test = 100; - const int cols_test = 50; - const int upper_border_test = 1000; - const int lower_border_test = -1000; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count.emplace_back(rows_test); - taskDataPar->inputs_count.emplace_back(cols_test); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_min(1, INT_MAX); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - // Create Task - ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_min[0], global_min[0]); - } -} - -TEST(ermilova_d_min_element_matrix_mpi, Matrix_50x100) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min(1, INT_MAX); - const int rows_test = 50; - const int cols_test = 100; - const int upper_border_test = 500; - const int lower_border_test = -500; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count.emplace_back(rows_test); - taskDataPar->inputs_count.emplace_back(cols_test); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_min(1, INT_MAX); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - // Create Task - ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_min[0], global_min[0]); - } -} - -TEST(ermilova_d_min_element_matrix_mpi, Matrix_500x500) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min(1, INT_MAX); - const int rows_test = 500; - const int cols_test = 500; - const int upper_border_test = 500; - const int lower_border_test = -500; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count.emplace_back(rows_test); - taskDataPar->inputs_count.emplace_back(cols_test); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - ermilova_d_min_element_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_min(1, INT_MAX); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); - taskDataSeq->outputs_count.emplace_back(reference_min.size()); - - // Create Task - ermilova_d_min_element_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_min[0], global_min[0]); - } -} diff --git a/tasks/mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp b/tasks/mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp deleted file mode 100644 index 3b7c7dfaa6d..00000000000 --- a/tasks/mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace ermilova_d_min_element_matrix_mpi { - -// std::vector getRandomVector(int size, int upper_border, int lower_border); -// std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border); - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - int res{}; - int cols, rows; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - int res{}; - int cols, rows; - boost::mpi::communicator world; -}; - -} // namespace ermilova_d_min_element_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/ermilova_d_min_element_matrix/perf_tests/main.cpp b/tasks/mpi/ermilova_d_min_element_matrix/perf_tests/main.cpp deleted file mode 100644 index b24b1dbe26f..00000000000 --- a/tasks/mpi/ermilova_d_min_element_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp" - -std::vector getRandomVector(int size, int upper_border, int lower_border) { - std::random_device dev; - std::mt19937 gen(dev()); - if (size <= 0) throw "Incorrect size"; - std::vector vec(size); - for (int i = 0; i < size; i++) { - vec[i] = lower_border + gen() % (upper_border - lower_border + 1); - } - return vec; -} - -std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border) { - if (rows <= 0 || cols <= 0) throw "Incorrect size"; - std::vector> vec(rows); - for (int i = 0; i < rows; i++) { - vec[i] = getRandomVector(cols, upper_border, lower_border); - } - return vec; -} - -TEST(ermilova_d_min_element_matrix_mpi, test_pipeline_run) { - std::random_device dev; - std::mt19937 gen(dev()); - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min(1, INT_MAX); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - int reference_min = -5000; - - if (world.rank() == 0) { - const int rows = 1000; - const int cols = 1000; - const int upper_border = 1000; - const int lower_border = -1000; - - global_matrix = getRandomMatrix(rows, cols, upper_border, lower_border); - - int rnd_rows = gen() % rows; - int rnd_cols = gen() % cols; - global_matrix[rnd_rows][rnd_cols] = reference_min; - - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(reference_min, global_min[0]); - } -} - -TEST(ermilova_d_min_element_matrix_mpi, test_task_run) { - std::random_device dev; - std::mt19937 gen(dev()); - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_min(1, INT_MAX); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - int reference_min = -5000; - - if (world.rank() == 0) { - const int rows = 1000; - const int cols = 1000; - const int upper_border = 1000; - const int lower_border = -1000; - - global_matrix = getRandomMatrix(rows, cols, upper_border, lower_border); - int rnd_rows = gen() % rows; - int rnd_cols = gen() % cols; - global_matrix[rnd_rows][rnd_cols] = reference_min; - - for (unsigned int i = 0; i < global_matrix.size(); i++) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - } - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); - taskDataPar->outputs_count.emplace_back(global_min.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(reference_min, global_min[0]); - } -} diff --git a/tasks/mpi/ermilova_d_min_element_matrix/src/ops_mpi.cpp b/tasks/mpi/ermilova_d_min_element_matrix/src/ops_mpi.cpp deleted file mode 100644 index 60c64a89a2e..00000000000 --- a/tasks/mpi/ermilova_d_min_element_matrix/src/ops_mpi.cpp +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "mpi/ermilova_d_min_element_matrix/include/ops_mpi.hpp" - -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool ermilova_d_min_element_matrix_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - rows = taskData->inputs_count[0]; - cols = taskData->inputs_count[1]; - - input_.resize(rows, std::vector(cols)); - - for (int i = 0; i < rows; i++) { - auto* tpr_ptr = reinterpret_cast(taskData->inputs[i]); - for (int j = 0; j < cols; j++) { - input_[i][j] = tpr_ptr[j]; - } - } - // Init value for output - res = INT_MAX; - return true; -} - -bool ermilova_d_min_element_matrix_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - // Check count elements of output - return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0; -} - -bool ermilova_d_min_element_matrix_mpi::TestMPITaskSequential::run() { - internal_order_test(); - - for (size_t i = 0; i < input_.size(); i++) { - for (size_t j = 0; j < input_[i].size(); j++) { - if (res > input_[i][j]) { - res = input_[i][j]; - } - } - } - return true; -} - -bool ermilova_d_min_element_matrix_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} - -bool ermilova_d_min_element_matrix_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - if (world.rank() == 0) { - rows = taskData->inputs_count[0]; - cols = taskData->inputs_count[1]; - - input_ = std::vector(rows * cols); - - for (int i = 0; i < rows; i++) { - auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); - for (int j = 0; j < cols; j++) { - input_[i * cols + j] = tmp_ptr[j]; - } - } - } - res = INT_MAX; - return true; -} - -bool ermilova_d_min_element_matrix_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - // Check count elements of output - return taskData->outputs_count[0] == 1 && !(taskData->inputs.empty()); - } - return true; -} - -bool ermilova_d_min_element_matrix_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - unsigned int delta = 0; - unsigned int extra = 0; - - if (world.rank() == 0) { - delta = rows * cols / world.size(); - extra = rows * cols % world.size(); - } - - broadcast(world, delta, 0); - - if (world.rank() == 0) { - for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, input_.data() + delta * proc + extra, delta); - } - } - - local_input_ = std::vector(delta); - - if (world.rank() == 0) { - local_input_ = std::vector(input_.begin(), input_.begin() + delta + extra); - } else { - world.recv(0, 0, local_input_.data(), delta); - } - - int local_min = INT_MAX; - if (!local_input_.empty()) { - local_min = *std::min_element(local_input_.begin(), local_input_.end()); - } - reduce(world, local_min, res, boost::mpi::minimum(), 0); - return true; -} - -bool ermilova_d_min_element_matrix_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = res; - } - return true; -} diff --git a/tasks/seq/ermilova_d_min_element_matrix/func_tests/main.cpp b/tasks/seq/ermilova_d_min_element_matrix/func_tests/main.cpp deleted file mode 100644 index 2f289eaab65..00000000000 --- a/tasks/seq/ermilova_d_min_element_matrix/func_tests/main.cpp +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "seq/ermilova_d_min_element_matrix/include/ops_seq.hpp" - -std::vector getRandomVector(int size, int upper_border, int lower_border) { - std::random_device dev; - std::mt19937 gen(dev()); - if (size <= 0) throw "Incorrect size"; - std::vector vec(size); - for (int i = 0; i < size; i++) { - vec[i] = lower_border + gen() % (upper_border - lower_border + 1); - } - return vec; -} - -std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border) { - if (rows <= 0 || cols <= 0) throw "Incorrect size"; - std::vector> vec(rows); - for (int i = 0; i < rows; i++) { - vec[i] = getRandomVector(cols, upper_border, lower_border); - } - return vec; -} - -TEST(ermilova_d_min_element_matrix_seq, Can_create_vector) { - const int size_test = 10; - const int upper_border_test = 100; - const int lower_border_test = -100; - EXPECT_NO_THROW(getRandomVector(size_test, upper_border_test, lower_border_test)); -} - -TEST(ermilova_d_min_element_matrix_seq, Cant_create_incorrect_vector) { - const int size_test = -10; - const int upper_border_test = 100; - const int lower_border_test = -100; - EXPECT_ANY_THROW(getRandomVector(size_test, upper_border_test, lower_border_test)); -} - -TEST(ermilova_d_min_element_matrix_seq, Can_create_matrix) { - const int rows_test = 10; - const int cols_test = 10; - const int upper_border_test = 100; - const int lower_border_test = -100; - EXPECT_NO_THROW(getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test)); -} - -TEST(ermilova_d_min_element_matrix_seq, Cant_create_incorrect_matrix) { - const int rows_test = -10; - const int cols_test = 0; - const int upper_border_test = 100; - const int lower_border_test = -100; - EXPECT_ANY_THROW(getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test)); -} - -TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_1x1) { - const int rows_test = 1; - const int cols_test = 1; - const int upper_border_test = 1000; - const int lower_border_test = -1000; - int reference_min = -5000; - - // Create data - std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - std::vector out(1, INT_MAX); - - std::random_device dev; - std::mt19937 gen(dev()); - int rnd_rows = gen() % rows_test; - int rnd_cols = gen() % cols_test; - in[rnd_rows][rnd_cols] = reference_min; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(reference_min, out[0]); -} - -TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_10x10) { - const int rows_test = 10; - const int cols_test = 10; - const int upper_border_test = 100; - const int lower_border_test = -100; - int reference_min = -500; - - // Create data - std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - std::vector out(1, INT_MAX); - - std::random_device dev; - std::mt19937 gen(dev()); - int rnd_rows = gen() % rows_test; - int rnd_cols = gen() % cols_test; - in[rnd_rows][rnd_cols] = reference_min; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(reference_min, out[0]); -} - -TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_100x100) { - const int rows_test = 100; - const int cols_test = 100; - const int upper_border_test = 1000; - const int lower_border_test = -1000; - int reference_min = -5000; - - // Create data - std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - std::vector out(1, INT_MAX); - - std::random_device dev; - std::mt19937 gen(dev()); - int rnd_rows = gen() % rows_test; - int rnd_cols = gen() % cols_test; - in[rnd_rows][rnd_cols] = reference_min; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(reference_min, out[0]); -} - -TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_50x100) { - const int rows_test = 50; - const int cols_test = 100; - const int upper_border_test = 1000; - const int lower_border_test = -1000; - int reference_min = -5000; - - // Create data - std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - std::vector out(1, INT_MAX); - - std::random_device dev; - std::mt19937 gen(dev()); - int rnd_rows = gen() % rows_test; - int rnd_cols = gen() % cols_test; - in[rnd_rows][rnd_cols] = reference_min; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(reference_min, out[0]); -} - -TEST(ermilova_d_min_element_matrix_seq, Test_min_matrix_100x50) { - const int rows_test = 100; - const int cols_test = 50; - const int upper_border_test = 1000; - const int lower_border_test = -1000; - int reference_min = -5000; - - // Create data - std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - std::vector out(1, INT_MAX); - - std::random_device dev; - std::mt19937 gen(dev()); - int rnd_rows = gen() % rows_test; - int rnd_cols = gen() % cols_test; - in[rnd_rows][rnd_cols] = reference_min; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - ermilova_d_min_element_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(reference_min, out[0]); -} diff --git a/tasks/seq/ermilova_d_min_element_matrix/include/ops_seq.hpp b/tasks/seq/ermilova_d_min_element_matrix/include/ops_seq.hpp deleted file mode 100644 index ae6a8b3640c..00000000000 --- a/tasks/seq/ermilova_d_min_element_matrix/include/ops_seq.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include "core/task/include/task.hpp" - -namespace ermilova_d_min_element_matrix_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - int res{}; - int cols, rows; -}; - -} // namespace ermilova_d_min_element_matrix_seq \ No newline at end of file diff --git a/tasks/seq/ermilova_d_min_element_matrix/perf_tests/main.cpp b/tasks/seq/ermilova_d_min_element_matrix/perf_tests/main.cpp deleted file mode 100644 index a780c7e7497..00000000000 --- a/tasks/seq/ermilova_d_min_element_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "seq/ermilova_d_min_element_matrix/include/ops_seq.hpp" - -std::vector getRandomVector(int size, int upper_border, int lower_border) { - std::random_device dev; - std::mt19937 gen(dev()); - if (size <= 0) throw "Incorrect size"; - std::vector vec(size); - for (int i = 0; i < size; i++) { - vec[i] = lower_border + gen() % (upper_border - lower_border + 1); - } - return vec; -} - -std::vector> getRandomMatrix(int rows, int cols, int upper_border, int lower_border) { - if (rows <= 0 || cols <= 0) throw "Incorrect size"; - std::vector> vec(rows); - for (int i = 0; i < rows; i++) { - vec[i] = getRandomVector(cols, upper_border, lower_border); - } - return vec; -} - -TEST(ermilova_d_min_element_matrix_seq, test_pipeline_run) { - const int rows_test = 1000; - const int cols_test = 1000; - const int upper_border_test = 1000; - const int lower_border_test = -1000; - int reference_min = -5000; - // Create data - std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - std::vector out(1, INT_MAX); - - std::random_device dev; - std::mt19937 gen(dev()); - int rnd_rows = gen() % rows_test; - int rnd_cols = gen() % cols_test; - in[rnd_rows][rnd_cols] = reference_min; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(reference_min, out[0]); -} - -TEST(ermilova_d_min_element_matrix_seq, test_task_run) { - const int rows_test = 1000; - const int cols_test = 1000; - const int upper_border_test = 1000; - const int lower_border_test = -1000; - int reference_min = -5000; - - // Create data - std::vector> in = getRandomMatrix(rows_test, cols_test, upper_border_test, lower_border_test); - std::vector out(1, INT_MAX); - - std::random_device dev; - std::mt19937 gen(dev()); - int rnd_rows = gen() % rows_test; - int rnd_cols = gen() % cols_test; - in[rnd_rows][rnd_cols] = reference_min; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < in.size(); i++) { - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - } - taskDataSeq->inputs_count.emplace_back(rows_test); - taskDataSeq->inputs_count.emplace_back(cols_test); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(reference_min, out[0]); -} diff --git a/tasks/seq/ermilova_d_min_element_matrix/src/ops_seq.cpp b/tasks/seq/ermilova_d_min_element_matrix/src/ops_seq.cpp deleted file mode 100644 index e9a5fff0a54..00000000000 --- a/tasks/seq/ermilova_d_min_element_matrix/src/ops_seq.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2024 Nesterov Alexander -#include "seq/ermilova_d_min_element_matrix/include/ops_seq.hpp" - -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool ermilova_d_min_element_matrix_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - // Init value for input and output - rows = taskData->inputs_count[0]; - cols = taskData->inputs_count[1]; - - input_.resize(rows, std::vector(cols)); - - for (int i = 0; i < rows; i++) { - auto* tpr_ptr = reinterpret_cast(taskData->inputs[i]); - for (int j = 0; j < cols; j++) { - input_[i][j] = tpr_ptr[j]; - } - } - - // Init value for output - res = INT_MAX; - return true; -} - -bool ermilova_d_min_element_matrix_seq::TestTaskSequential::validation() { - internal_order_test(); - // Check count elements of output - - return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0; -} - -bool ermilova_d_min_element_matrix_seq::TestTaskSequential::run() { - internal_order_test(); - for (size_t i = 0; i < input_.size(); i++) { - for (size_t j = 0; j < input_[i].size(); j++) { - if (res > input_[i][j]) { - res = input_[i][j]; - } - } - } - return true; -} - -bool ermilova_d_min_element_matrix_seq::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} From 76d24e3d01e54df93cd4dd92a9419346fd0b4153 Mon Sep 17 00:00:00 2001 From: ARBUZNIJ <120045631+ARBUZNIJ@users.noreply.github.com> Date: Mon, 4 Nov 2024 21:46:59 +0300 Subject: [PATCH 099/155] =?UTF-8?q?=D0=9A=D0=BE=D0=B2=D0=B0=D0=BB=D0=B5?= =?UTF-8?q?=D0=B2=20=D0=9A=D0=BE=D0=BD=D1=81=D1=82=D0=B0=D0=BD=D1=82=D0=B8?= =?UTF-8?q?=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=206.=20=D0=9D=D0=B0=D1=85?= =?UTF-8?q?=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=87=D0=B8=D1=81?= =?UTF-8?q?=D0=BB=D0=B0=20=D0=BD=D0=B0=D1=80=D1=83=D1=88=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B9=20=D1=83=D0=BF=D0=BE=D1=80=D1=8F=D0=B4=D0=BE=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=BD=D0=BE=D1=81=D1=82=D0=B8=20=D1=81=D0=BE=D1=81=D0=B5?= =?UTF-8?q?=D0=B4=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0?= =?UTF-8?q?.=20(#129)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Задача состоит в подсчёте количества нарушений упорядоченности числового вектора. (приведённая версия считает количество нарушений упорядоченности = 0 для упорядоченного по возрастанию вектора) Последовательная реализация инициализирует вектор данными, а затем проходит по нему в цикле, сравнивая каждый элемент с последующим. Если один элемент больше следующего, увеличивается счётчик. В параллельной версии вектор делится на части, и каждый процесс обрабатывает свою часть. Каждый из них подсчитывает нарушения в своей части локально, а затем все результаты собираются и суммируются на главном процессе. --- .../func_tests/main.cpp | 241 +++++++++++++++ .../include/header.hpp | 31 ++ .../perf_tests/main.cpp | 279 ++++++++++++++++++ .../src/sourse.cpp | 72 +++++ .../func_tests/main.cpp | 173 +++++++++++ .../include/header.hpp | 26 ++ .../perf_tests/main.cpp | 167 +++++++++++ .../src/sourse.cpp | 42 +++ 8 files changed, 1031 insertions(+) create mode 100644 tasks/mpi/kovalev_k_num_of_orderly_violations/func_tests/main.cpp create mode 100644 tasks/mpi/kovalev_k_num_of_orderly_violations/include/header.hpp create mode 100644 tasks/mpi/kovalev_k_num_of_orderly_violations/perf_tests/main.cpp create mode 100644 tasks/mpi/kovalev_k_num_of_orderly_violations/src/sourse.cpp create mode 100644 tasks/seq/kovalev_k_num_of_orderly_violations/func_tests/main.cpp create mode 100644 tasks/seq/kovalev_k_num_of_orderly_violations/include/header.hpp create mode 100644 tasks/seq/kovalev_k_num_of_orderly_violations/perf_tests/main.cpp create mode 100644 tasks/seq/kovalev_k_num_of_orderly_violations/src/sourse.cpp diff --git a/tasks/mpi/kovalev_k_num_of_orderly_violations/func_tests/main.cpp b/tasks/mpi/kovalev_k_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..d4f7ff11fe6 --- /dev/null +++ b/tasks/mpi/kovalev_k_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,241 @@ +#include + +#include "mpi/kovalev_k_num_of_orderly_violations/include/header.hpp" + +TEST(kovalev_k_num_of_orderly_violations_mpi, zero_length) { + std::vector in; + std::vector out; + boost::mpi::communicator world; + std::shared_ptr tmpPar = std::make_shared(); + if (world.rank() == 0) { + tmpPar->inputs_count.emplace_back(in.size()); + tmpPar->inputs.emplace_back(reinterpret_cast(in.data())); + tmpPar->outputs.emplace_back(reinterpret_cast(out.data())); + tmpPar->outputs_count.emplace_back(out.size()); + } + kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar tmpTaskPar(tmpPar); + if (world.rank() == 0) { + ASSERT_FALSE(tmpTaskPar.validation()); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, Test_NoOV_viol_0_int_) { + const size_t length = 100; + std::srand(std::time(nullptr)); + const int alpha = rand(); + std::vector in(length, alpha); + std::vector out(1, 0); + boost::mpi::communicator world; + std::shared_ptr tmpPar = std::make_shared(); + if (world.rank() == 0) { + tmpPar->inputs_count.emplace_back(in.size()); + tmpPar->inputs.emplace_back(reinterpret_cast(in.data())); + tmpPar->outputs.emplace_back(reinterpret_cast(out.data())); + tmpPar->outputs_count.emplace_back(out.size()); + } + kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar tmpTaskPar(tmpPar); + ASSERT_TRUE(tmpTaskPar.validation()); + tmpTaskPar.pre_processing(); + tmpTaskPar.run(); + tmpTaskPar.post_processing(); + size_t result = 0; + if (world.rank() == 0) { + ASSERT_EQ(result, out[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, Test_NoOV_len_100_opposite_sort_int_) { + const size_t length = 100; + std::srand(std::time(nullptr)); + const int alpha = rand(); + std::vector in(length, alpha); + std::vector out(1, 0); + for (size_t i = 0; i < length; i++) { + in[i] = 2 * length - i; + } + boost::mpi::communicator world; + std::shared_ptr tmpPar = std::make_shared(); + if (world.rank() == 0) { + tmpPar->inputs_count.emplace_back(in.size()); + tmpPar->inputs.emplace_back(reinterpret_cast(in.data())); + tmpPar->outputs.emplace_back(reinterpret_cast(out.data())); + tmpPar->outputs_count.emplace_back(out.size()); + } + kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar tmpTaskPar(tmpPar); + ASSERT_TRUE(tmpTaskPar.validation()); + tmpTaskPar.pre_processing(); + tmpTaskPar.run(); + tmpTaskPar.post_processing(); + size_t result = length - 1; + if (world.rank() == 0) { + ASSERT_EQ(result, out[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, Test_NoOV_len_10_rand_int_) { + const size_t length = 10; + std::vector in(length); + std::vector out(1, 0); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) in[i] = rand() * std::pow(-1, rand()); + boost::mpi::communicator world; + std::shared_ptr tmpPar = std::make_shared(); + if (world.rank() == 0) { + tmpPar->inputs_count.emplace_back(in.size()); + tmpPar->inputs.emplace_back(reinterpret_cast(in.data())); + tmpPar->outputs.emplace_back(reinterpret_cast(out.data())); + tmpPar->outputs_count.emplace_back(out.size()); + } + kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar tmpTaskPar(tmpPar); + ASSERT_TRUE(tmpTaskPar.validation()); + tmpTaskPar.pre_processing(); + tmpTaskPar.run(); + tmpTaskPar.post_processing(); + if (world.rank() == 0) { + size_t result = 0; + for (size_t i = 1; i < length; i++) + if (in[i - 1] > in[i]) result++; + ASSERT_EQ(result, out[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, Test_NoOV_len_10000_rand_int_) { + const size_t length = 10000; + std::vector in(length); + std::vector out(1, 0); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) in[i] = rand() * std::pow(-1, rand()); + boost::mpi::communicator world; + std::shared_ptr tmpPar = std::make_shared(); + if (world.rank() == 0) { + tmpPar->inputs_count.emplace_back(in.size()); + tmpPar->inputs.emplace_back(reinterpret_cast(in.data())); + tmpPar->outputs.emplace_back(reinterpret_cast(out.data())); + tmpPar->outputs_count.emplace_back(out.size()); + } + kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar tmpTaskPar(tmpPar); + ASSERT_TRUE(tmpTaskPar.validation()); + tmpTaskPar.pre_processing(); + tmpTaskPar.run(); + tmpTaskPar.post_processing(); + if (world.rank() == 0) { + size_t result = 0; + for (size_t i = 1; i < length; i++) + if (in[i - 1] > in[i]) result++; + ASSERT_EQ(result, out[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, Test_NoOV_viol_0_double_) { + const size_t length = 100; + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + const double alpha = min + static_cast(rand()) / RAND_MAX * (max - min); + std::vector in(length, alpha); + std::vector out(1, 0); + boost::mpi::communicator world; + std::shared_ptr tmpPar = std::make_shared(); + if (world.rank() == 0) { + tmpPar->inputs_count.emplace_back(in.size()); + tmpPar->inputs.emplace_back(reinterpret_cast(in.data())); + tmpPar->outputs.emplace_back(reinterpret_cast(out.data())); + tmpPar->outputs_count.emplace_back(out.size()); + } + kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar tmpTaskPar(tmpPar); + ASSERT_TRUE(tmpTaskPar.validation()); + tmpTaskPar.pre_processing(); + tmpTaskPar.run(); + tmpTaskPar.post_processing(); + size_t result = 0; + if (world.rank() == 0) { + ASSERT_EQ(result, out[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, Test_NoOV_len_100_opposite_sort_double_) { + const size_t length = 100; + std::srand(std::time(nullptr)); + const double alpha = (static_cast(rand()) - 1) / (RAND_MAX); + std::vector in(length); + std::vector out(1, 0); + in[0] = static_cast(length); + for (size_t i = 1; i < length; i++) { + in[i] = in[i - 1] * alpha; + } + boost::mpi::communicator world; + std::shared_ptr tmpPar = std::make_shared(); + if (world.rank() == 0) { + tmpPar->inputs_count.emplace_back(in.size()); + tmpPar->inputs.emplace_back(reinterpret_cast(in.data())); + tmpPar->outputs.emplace_back(reinterpret_cast(out.data())); + tmpPar->outputs_count.emplace_back(out.size()); + } + kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar tmpTaskPar(tmpPar); + ASSERT_TRUE(tmpTaskPar.validation()); + tmpTaskPar.pre_processing(); + tmpTaskPar.run(); + tmpTaskPar.post_processing(); + size_t result = length - 1; + if (world.rank() == 0) { + ASSERT_EQ(result, out[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, Test_NoOV_len_10_rand_double_) { + const size_t length = 10; + std::vector in(length); + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) in[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + std::vector out(1, 0); + boost::mpi::communicator world; + std::shared_ptr tmpPar = std::make_shared(); + if (world.rank() == 0) { + tmpPar->inputs_count.emplace_back(in.size()); + tmpPar->inputs.emplace_back(reinterpret_cast(in.data())); + tmpPar->outputs.emplace_back(reinterpret_cast(out.data())); + tmpPar->outputs_count.emplace_back(out.size()); + } + kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar tmpTaskPar(tmpPar); + ASSERT_TRUE(tmpTaskPar.validation()); + tmpTaskPar.pre_processing(); + tmpTaskPar.run(); + tmpTaskPar.post_processing(); + if (world.rank() == 0) { + size_t result = 0; + for (size_t i = 1; i < length; i++) + if (in[i - 1] > in[i]) result++; + ASSERT_EQ(result, out[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, Test_NoOV_len_10000_rand_double_) { + const size_t length = 10000; + std::vector in(length); + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) in[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + std::vector out(1, 0); + boost::mpi::communicator world; + std::shared_ptr tmpPar = std::make_shared(); + if (world.rank() == 0) { + tmpPar->inputs_count.emplace_back(in.size()); + tmpPar->inputs.emplace_back(reinterpret_cast(in.data())); + tmpPar->outputs.emplace_back(reinterpret_cast(out.data())); + tmpPar->outputs_count.emplace_back(out.size()); + } + kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar tmpTaskPar(tmpPar); + ASSERT_TRUE(tmpTaskPar.validation()); + tmpTaskPar.pre_processing(); + tmpTaskPar.run(); + tmpTaskPar.post_processing(); + if (world.rank() == 0) { + size_t result = 0; + for (size_t i = 1; i < length; i++) + if (in[i - 1] > in[i]) result++; + ASSERT_EQ(result, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/kovalev_k_num_of_orderly_violations/include/header.hpp b/tasks/mpi/kovalev_k_num_of_orderly_violations/include/header.hpp new file mode 100644 index 00000000000..b1a5a1dbe2f --- /dev/null +++ b/tasks/mpi/kovalev_k_num_of_orderly_violations/include/header.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kovalev_k_num_of_orderly_violations_mpi { +template +class NumOfOrderlyViolationsPar : public ppc::core::Task { + private: + std::vector glob_v; + std::vector loc_v; + size_t n = 0, l_res = 0, g_res = 0; + int rank, size; + boost::mpi::communicator world; + + public: + explicit NumOfOrderlyViolationsPar(std::shared_ptr taskData_) : Task(taskData_) {} + bool count_num_of_orderly_violations_mpi(); + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; +}; +} // namespace kovalev_k_num_of_orderly_violations_mpi \ No newline at end of file diff --git a/tasks/mpi/kovalev_k_num_of_orderly_violations/perf_tests/main.cpp b/tasks/mpi/kovalev_k_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..2e5909a139c --- /dev/null +++ b/tasks/mpi/kovalev_k_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,279 @@ +#include + +#include +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kovalev_k_num_of_orderly_violations/include/header.hpp" + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_pipeline_run) { + boost::mpi::communicator world; + int rank = world.rank(); + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + size_t length; + const int alpha = 1; + if (rank == 0) { + length = 10; + g_vec = std::vector(length, alpha); + taskDataPar->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskDataPar->inputs_count.emplace_back(g_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskDataPar->outputs_count.emplace_back(g_num_viol.size()); + } + auto testMpiParallel = + std::make_shared>(taskDataPar); + ASSERT_TRUE(testMpiParallel->validation()); + testMpiParallel->pre_processing(); + testMpiParallel->run(); + testMpiParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (rank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + ASSERT_EQ(res, g_num_viol[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_int_10000_perf) { + boost::mpi::communicator world; + int rank = world.rank(); + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + size_t length = 10000; + if (rank == 0) { + g_vec = std::vector(length); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = rand() * std::pow(-1, rand()); + taskDataPar->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskDataPar->inputs_count.emplace_back(g_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskDataPar->outputs_count.emplace_back(g_num_viol.size()); + } + auto testMpiParallel = + std::make_shared>(taskDataPar); + ASSERT_TRUE(testMpiParallel->validation()); + testMpiParallel->pre_processing(); + testMpiParallel->run(); + testMpiParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (rank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_int_random_size_perf) { + boost::mpi::communicator world; + int rank = world.rank(); + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + size_t length = 9871; + if (rank == 0) { + g_vec = std::vector(length); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = rand() * std::pow(-1, rand()); + taskDataPar->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskDataPar->inputs_count.emplace_back(g_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskDataPar->outputs_count.emplace_back(g_num_viol.size()); + } + auto testMpiParallel = + std::make_shared>(taskDataPar); + ASSERT_TRUE(testMpiParallel->validation()); + testMpiParallel->pre_processing(); + testMpiParallel->run(); + testMpiParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (rank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_int_1000000_perf) { + boost::mpi::communicator world; + int rank = world.rank(); + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + size_t length = 1000000; + if (rank == 0) { + g_vec = std::vector(length); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = rand() * std::pow(-1, rand()); + taskDataPar->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskDataPar->inputs_count.emplace_back(g_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskDataPar->outputs_count.emplace_back(g_num_viol.size()); + } + auto testMpiParallel = + std::make_shared>(taskDataPar); + ASSERT_TRUE(testMpiParallel->validation()); + testMpiParallel->pre_processing(); + testMpiParallel->run(); + testMpiParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (rank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_double_10000_perf) { + boost::mpi::communicator world; + int rank = world.rank(); + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + size_t length = 10000; + if (rank == 0) { + g_vec = std::vector(length); + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + taskDataPar->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskDataPar->inputs_count.emplace_back(g_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskDataPar->outputs_count.emplace_back(g_num_viol.size()); + } + auto testMpiParallel = + std::make_shared>(taskDataPar); + ASSERT_TRUE(testMpiParallel->validation()); + testMpiParallel->pre_processing(); + testMpiParallel->run(); + testMpiParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (rank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_double_random_size_perf) { + boost::mpi::communicator world; + int rank = world.rank(); + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + size_t length = 9871; + if (rank == 0) { + g_vec = std::vector(length); + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + taskDataPar->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskDataPar->inputs_count.emplace_back(g_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskDataPar->outputs_count.emplace_back(g_num_viol.size()); + } + auto testMpiParallel = + std::make_shared>(taskDataPar); + ASSERT_TRUE(testMpiParallel->validation()); + testMpiParallel->pre_processing(); + testMpiParallel->run(); + testMpiParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (rank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); + } +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_double_1000000_perf) { + boost::mpi::communicator world; + int rank = world.rank(); + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + size_t length = 1000000; + if (rank == 0) { + g_vec = std::vector(length); + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + taskDataPar->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskDataPar->inputs_count.emplace_back(g_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskDataPar->outputs_count.emplace_back(g_num_viol.size()); + } + auto testMpiParallel = + std::make_shared>(taskDataPar); + ASSERT_TRUE(testMpiParallel->validation()); + testMpiParallel->pre_processing(); + testMpiParallel->run(); + testMpiParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (rank == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); + } +} diff --git a/tasks/mpi/kovalev_k_num_of_orderly_violations/src/sourse.cpp b/tasks/mpi/kovalev_k_num_of_orderly_violations/src/sourse.cpp new file mode 100644 index 00000000000..30a8c46aaf8 --- /dev/null +++ b/tasks/mpi/kovalev_k_num_of_orderly_violations/src/sourse.cpp @@ -0,0 +1,72 @@ +#include "mpi/kovalev_k_num_of_orderly_violations/include/header.hpp" + +template +bool kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar::count_num_of_orderly_violations_mpi() { + for (size_t i = 1; i < loc_v.size(); i++) { + if (loc_v[i - 1] > loc_v[i]) { + l_res++; + } + } + return true; +} + +template +bool kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar::pre_processing() { + internal_order_test(); + g_res = l_res = 0; + rank = world.rank(); + size = world.size(); + if (rank == 0) { + n = taskData->inputs_count[0]; + glob_v.resize(n); + void* ptr_vec = glob_v.data(); + void* ptr_input = taskData->inputs[0]; + memcpy(ptr_vec, ptr_input, sizeof(T) * n); + } + return true; +} + +template +bool kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar::validation() { + internal_order_test(); + if (world.rank() == 0) { + if (taskData->inputs.empty() || taskData->outputs.empty() || taskData->inputs_count[0] <= 0 || + taskData->outputs_count[0] != 1) { + return false; + } + } + return true; +} + +template +bool kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar::run() { + internal_order_test(); + boost::mpi::broadcast(world, n, 0); + int scratter_length = n / size; + loc_v.resize(scratter_length); + std::vector sendcounts(size, scratter_length); + std::vector displs(size, 0); + for (int i = 1; i < size; i++) displs[i] = displs[i] + scratter_length; + boost::mpi::scatter(world, glob_v.data(), loc_v.data(), scratter_length, 0); + count_num_of_orderly_violations_mpi(); + boost::mpi::reduce(world, l_res, g_res, std::plus(), 0); + if (rank == 0) { + for (int i = 1; i < size; i++) + if (glob_v[i * (n / size) - 1] > glob_v[i * (n / size)]) g_res++; + for (size_t i = n - n % size; i < n; i++) + if (glob_v[i - 1] > glob_v[i]) g_res++; + } + return true; +} + +template +bool kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar::post_processing() { + internal_order_test(); + if (rank == 0) { + reinterpret_cast(taskData->outputs[0])[0] = g_res; + } + return true; +} + +template class kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar; +template class kovalev_k_num_of_orderly_violations_mpi::NumOfOrderlyViolationsPar; \ No newline at end of file diff --git a/tasks/seq/kovalev_k_num_of_orderly_violations/func_tests/main.cpp b/tasks/seq/kovalev_k_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..2054b723e15 --- /dev/null +++ b/tasks/seq/kovalev_k_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,173 @@ +#include + +#include "seq/kovalev_k_num_of_orderly_violations/include/header.hpp" + +TEST(kovalev_k_num_of_orderly_violations_seq, zero_length) { + std::vector in; + std::vector out; + std::shared_ptr taskSeq = std::make_shared(); + taskSeq->inputs_count.emplace_back(in.size()); + taskSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskSeq->outputs_count.emplace_back(out.size()); + kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations tmpTaskSeq(taskSeq); + ASSERT_FALSE(tmpTaskSeq.validation()); +} + +TEST(kovalev_k_num_of_orderly_violations_seq, Test_NoOV_viol_0_int_) { + const size_t length = 10; + std::srand(std::time(nullptr)); + const int alpha = rand(); + std::vector in(length, alpha); + std::vector out(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + taskSeq->inputs_count.emplace_back(in.size()); + taskSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskSeq->outputs_count.emplace_back(out.size()); + kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations tmpTaskSeq(taskSeq); + ASSERT_EQ(tmpTaskSeq.validation(), true); + tmpTaskSeq.pre_processing(); + tmpTaskSeq.run(); + tmpTaskSeq.post_processing(); + size_t result = 0; + ASSERT_EQ(result, out[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_seq, Test_NoOV_len_10_rand_int_) { + const size_t length = 10; + std::vector in(length); + std::vector out(1, 0); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) in[i] = rand() * std::pow(-1, rand()); + std::shared_ptr taskSeq = std::make_shared(); + taskSeq->inputs_count.emplace_back(in.size()); + taskSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskSeq->outputs_count.emplace_back(out.size()); + kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations tmpTaskSeq(taskSeq); + ASSERT_EQ(tmpTaskSeq.validation(), true); + tmpTaskSeq.pre_processing(); + tmpTaskSeq.run(); + tmpTaskSeq.post_processing(); + size_t result = 0; + for (size_t i = 1; i < length; i++) + if (in[i - 1] > in[i]) result++; + ASSERT_EQ(result, out[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_seq, Test_NoOV_len_10000_rand_int_) { + const size_t length = 10000; + std::vector in(length); + std::vector out(1, 0); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) in[i] = rand() * std::pow(-1, rand()); + std::shared_ptr taskSeq = std::make_shared(); + taskSeq->inputs_count.emplace_back(in.size()); + taskSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskSeq->outputs_count.emplace_back(out.size()); + kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations tmpTaskSeq(taskSeq); + ASSERT_EQ(tmpTaskSeq.validation(), true); + tmpTaskSeq.pre_processing(); + tmpTaskSeq.run(); + tmpTaskSeq.post_processing(); + size_t result = 0; + for (size_t i = 1; i < length; i++) + if (in[i - 1] > in[i]) result++; + ASSERT_EQ(result, out[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_seq, Test_NoOV_viol_0_double_) { + const size_t length = 10; + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + const double alpha = min + static_cast(rand()) / RAND_MAX * (max - min); + std::vector in(length, alpha); + std::vector out(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + taskSeq->inputs_count.emplace_back(in.size()); + taskSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskSeq->outputs_count.emplace_back(out.size()); + kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations tmpTaskSeq(taskSeq); + ASSERT_EQ(tmpTaskSeq.validation(), true); + tmpTaskSeq.pre_processing(); + tmpTaskSeq.run(); + tmpTaskSeq.post_processing(); + size_t result = 0; + ASSERT_EQ(result, out[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_seq, Test_NoOV_len_10_rand_double_) { + const size_t length = 10; + std::vector in(length); + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) in[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + std::vector out(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + taskSeq->inputs_count.emplace_back(in.size()); + taskSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskSeq->outputs_count.emplace_back(out.size()); + kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations tmpTaskSeq(taskSeq); + ASSERT_EQ(tmpTaskSeq.validation(), true); + tmpTaskSeq.pre_processing(); + tmpTaskSeq.run(); + tmpTaskSeq.post_processing(); + size_t result = 0; + for (size_t i = 1; i < length; i++) + if (in[i - 1] > in[i]) result++; + ASSERT_EQ(result, out[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_seq, Test_NoOV_len_10000_rand_double) { + const size_t length = 10000; + std::vector in(length); + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) in[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + std::vector out(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + taskSeq->inputs_count.emplace_back(in.size()); + taskSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskSeq->outputs_count.emplace_back(out.size()); + kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations tmpTaskSeq(taskSeq); + ASSERT_EQ(tmpTaskSeq.validation(), true); + tmpTaskSeq.pre_processing(); + tmpTaskSeq.run(); + tmpTaskSeq.post_processing(); + size_t result = 0; + for (size_t i = 1; i < length; i++) + if (in[i - 1] > in[i]) result++; + ASSERT_EQ(result, out[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_seq, Test_NoOV_len_1000000_rand_double) { + const size_t length = 1000000; + std::vector in(length); + auto max = static_cast(1000000); + auto min = static_cast(-1000000); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) in[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + std::vector out(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + taskSeq->inputs_count.emplace_back(in.size()); + taskSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskSeq->outputs_count.emplace_back(out.size()); + kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations tmpTaskSeq(taskSeq); + ASSERT_EQ(tmpTaskSeq.validation(), true); + tmpTaskSeq.pre_processing(); + tmpTaskSeq.run(); + tmpTaskSeq.post_processing(); + size_t result = 0; + for (size_t i = 1; i < length; i++) + if (in[i - 1] > in[i]) result++; + ASSERT_EQ(result, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/kovalev_k_num_of_orderly_violations/include/header.hpp b/tasks/seq/kovalev_k_num_of_orderly_violations/include/header.hpp new file mode 100644 index 00000000000..b3a7638fca5 --- /dev/null +++ b/tasks/seq/kovalev_k_num_of_orderly_violations/include/header.hpp @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kovalev_k_num_of_orderly_violations_seq { + +template +class NumOfOrderlyViolations : public ppc::core::Task { + private: + std::vector v; + size_t n, res = 0; + + public: + explicit NumOfOrderlyViolations(std::shared_ptr taskData_) + : Task(taskData_), n(taskData_->inputs_count[0]) {} + bool count_num_of_orderly_violations_seq(); + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; +}; +} // namespace kovalev_k_num_of_orderly_violations_seq \ No newline at end of file diff --git a/tasks/seq/kovalev_k_num_of_orderly_violations/perf_tests/main.cpp b/tasks/seq/kovalev_k_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..2867a829bd9 --- /dev/null +++ b/tasks/seq/kovalev_k_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,167 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kovalev_k_num_of_orderly_violations/include/header.hpp" + +TEST(kovalev_k_num_of_orderly_violations_seq, test_pipeline_run) { + const size_t length = 10; + const int alpha = 1; + std::vector in(length, alpha); + std::vector out(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + taskSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskSeq->inputs_count.emplace_back(in.size()); + taskSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskSeq->outputs_count.emplace_back(out.size()); + auto testTaskSequential = + std::make_shared>(taskSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + size_t result = 0; + ASSERT_EQ(result, out[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_int_10000_perf) { + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + size_t length = 10000; + g_vec = std::vector(length); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = rand() * std::pow(-1, rand()); + taskSeq->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskSeq->inputs_count.emplace_back(g_vec.size()); + taskSeq->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskSeq->outputs_count.emplace_back(g_num_viol.size()); + auto testTaskSequential = + std::make_shared>(taskSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_int_1000000_perf) { + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + size_t length = 1000000; + g_vec = std::vector(length); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = rand() * std::pow(-1, rand()); + taskSeq->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskSeq->inputs_count.emplace_back(g_vec.size()); + taskSeq->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskSeq->outputs_count.emplace_back(g_num_viol.size()); + auto testTaskSequential = + std::make_shared>(taskSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_double_10000_perf) { + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + size_t length = 10000; + double max = 1000000; + double min = -1000000; + g_vec = std::vector(length); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + taskSeq->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskSeq->inputs_count.emplace_back(g_vec.size()); + taskSeq->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskSeq->outputs_count.emplace_back(g_num_viol.size()); + auto testTaskSequential = + std::make_shared>(taskSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); +} + +TEST(kovalev_k_num_of_orderly_violations_mpi, test_double_1000000_perf) { + std::vector g_vec; + std::vector g_num_viol(1, 0); + std::shared_ptr taskSeq = std::make_shared(); + size_t length = 1000000; + double max = 1000000; + double min = -1000000; + g_vec = std::vector(length); + std::srand(std::time(nullptr)); + for (size_t i = 0; i < length; i++) g_vec[i] = min + static_cast(rand()) / RAND_MAX * (max - min); + taskSeq->inputs.emplace_back(reinterpret_cast(g_vec.data())); + taskSeq->inputs_count.emplace_back(g_vec.size()); + taskSeq->outputs.emplace_back(reinterpret_cast(g_num_viol.data())); + taskSeq->outputs_count.emplace_back(g_num_viol.size()); + auto testTaskSequential = + std::make_shared>(taskSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + size_t res = 0; + for (size_t i = 1; i < length; i++) + if (g_vec[i - 1] > g_vec[i]) res++; + ASSERT_EQ(res, g_num_viol[0]); +} \ No newline at end of file diff --git a/tasks/seq/kovalev_k_num_of_orderly_violations/src/sourse.cpp b/tasks/seq/kovalev_k_num_of_orderly_violations/src/sourse.cpp new file mode 100644 index 00000000000..ed0e6fe605b --- /dev/null +++ b/tasks/seq/kovalev_k_num_of_orderly_violations/src/sourse.cpp @@ -0,0 +1,42 @@ +#include "seq/kovalev_k_num_of_orderly_violations/include/header.hpp" + +template +bool kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations::count_num_of_orderly_violations_seq() { + res = 0; + for (size_t i = 1; i < n; i++) + if (v[i - 1] > v[i]) res++; + return true; +} + +template +bool kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations::pre_processing() { + internal_order_test(); + v = std::vector(n); + void* ptr_input = taskData->inputs[0]; + void* ptr_vec = v.data(); + memcpy(ptr_vec, ptr_input, sizeof(T) * n); + res = 0; + return true; +} + +template +bool kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations::validation() { + internal_order_test(); + return (taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1 && taskData->inputs_count[0] == n); +} + +template +bool kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations::run() { + internal_order_test(); + return count_num_of_orderly_violations_seq(); +} + +template +bool kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +template class kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations; +template class kovalev_k_num_of_orderly_violations_seq::NumOfOrderlyViolations; \ No newline at end of file From 80de1cb34939636d460a0e58307e615706c5a66c Mon Sep 17 00:00:00 2001 From: VladislavPoroshin <120133783+VladislavPoroshin@users.noreply.github.com> Date: Mon, 4 Nov 2024 22:07:21 +0300 Subject: [PATCH 100/155] =?UTF-8?q?=D0=9F=D0=BE=D1=80=D0=BE=D1=88=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=92=D0=BB=D0=B0=D0=B4=D0=B8=D1=81=D0=BB=D0=B0=D0=B2?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2017.=20=D0=9D=D0=B0=D1=85=D0=BE?= =?UTF-8?q?=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8?= =?UTF-8?q?=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0?= =?UTF-8?q?=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8?= =?UTF-8?q?=D1=86=D1=8B=20(#140)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Этапы последовательной части: Подготовка структуры для матрицы и результата: Создаем одномерный массив (матрицу) для хранения данных (по строкам). Этот массив будет содержать значения, которые мы будем анализировать. Также создаем одномерный массив для хранения результатов — минимальных значений, найденных в каждой строке матрицы. Заполнение матрицы: Инициализируем матрицу значениями. Это может быть сделано с помощью случайной генерации чисел или загрузки данных из файла. Также важно удостовериться, что все элементы матрицы корректно заполнены, в этом случае мы можем начать поиск минимумов. Поиск минимумов производится в каждой строке (с обновлением минимального значения): Проходим по каждой строке матрицы и ищем минимальное значение. Для каждой строки обновляем минимальное значение, если находим элемент, меньший текущего минимума. Сохранение найденных значений в выходной массив. После завершения поиска минимумов для всех строк, сохраняем найденные значения в выходной массив. Этот массив будет использоваться для дальнейшей обработки или вывода результатов. Этапы MPI части: Главный процесс загружает матрицу и распределяет строки между всеми процессами: Процесс 0 (главный процесс) загружает матрицу из файла или инициализирует её. Затем он делит строки матрицы между всеми доступными процессами, чтобы каждый процесс работал с уникальной частью данных. Каждый процесс выполняет поиск минимальных значений в своём наборе строк: Каждый процесс получает свою часть строк и выполняет поиск минимумов в этих строках. Результаты поиска (минимальные значения) сохраняются в локальном массиве каждого процесса. Все процессы отправляют свои результаты главному процессу. После завершения поиска, каждый процесс отправляет свои найденные минимальные значения обратно на главный процесс. Это может быть сделано с использованием операций отправки (reduce), где каждый процесс отправляет результаты по определенному адресу. Главный процесс собирает результаты и сохраняет их в конечный массив: Главный процесс получает результаты от всех процессов и объединяет их в один общий массив. После сбора всех данных, главный процесс может сохранить результаты в специальный массив (вектор). Заключение: Эти этапы обеспечивают эффективный поиск минимумов в матрице как в последовательном, так и в параллельном (MPI) режимах. Последовательная часть отвечает за базовую логику работы с данными, тогда как MPI часть позволяет распределить вычислительные нагрузки между несколькими процессами, что значительно ускоряет выполнение программы при работе с большими объемами данных. --------- Co-authored-by: Vlad Porosjin --- .../func_tests/main.cpp | 378 ++++++++++++++++++ .../include/ops_mpi.hpp | 49 +++ .../perf_tests/main.cpp | 100 +++++ .../src/ops_mpi.cpp | 188 +++++++++ .../func_tests/main.cpp | 257 ++++++++++++ .../include/ops_seq.hpp | 28 ++ .../perf_tests/main.cpp | 93 +++++ .../src/ops_seq.cpp | 74 ++++ 8 files changed, 1167 insertions(+) create mode 100644 tasks/mpi/poroshin_v_find_min_val_row_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/poroshin_v_find_min_val_row_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/poroshin_v_find_min_val_row_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/poroshin_v_find_min_val_row_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/poroshin_v_find_min_val_row_matrix/func_tests/main.cpp create mode 100644 tasks/seq/poroshin_v_find_min_val_row_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/poroshin_v_find_min_val_row_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/poroshin_v_find_min_val_row_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/poroshin_v_find_min_val_row_matrix/func_tests/main.cpp b/tasks/mpi/poroshin_v_find_min_val_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..031289d9e1a --- /dev/null +++ b/tasks/mpi/poroshin_v_find_min_val_row_matrix/func_tests/main.cpp @@ -0,0 +1,378 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/poroshin_v_find_min_val_row_matrix/include/ops_mpi.hpp" + +TEST(poroshin_v_find_min_val_row_matrix_mpi, Test_rand_100_100) { + int n = 100; + int m = 100; + boost::mpi::communicator world; + std::vector global_matrix; + std::vector m_vec(m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::gen(m, n); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m); + } + + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector rm_vec(m); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(rm_vec.data())); + taskDataSeq->outputs_count.emplace_back(m); + + // Create Task + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(m_vec, rm_vec); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_mpi, Test_2_4_0) { + int n = 5; + int m = 3; + boost::mpi::communicator world; + std::vector global_matrix; + std::vector ans; + std::vector m_vec(m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = {2, 5, 6, 7, 4, 9, 4, 6, 7, 9, 3, 4, 8, 5, 0}; + ans = {2, 4, 0}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m); + } + + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector rm_vec(m); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(rm_vec.data())); + taskDataSeq->outputs_count.emplace_back(m); + + // Create Task + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(m_vec, ans); + ASSERT_EQ(rm_vec, ans); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_mpi, Test_4_4_2) { + int m = 3; + int n = 6; + boost::mpi::communicator world; + std::vector global_matrix; + std::vector ans; + std::vector m_vec(m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = {10, 7, 4, 8, 7, 9, 13, 4, 5, 7, 6, 9, 12, 4, 2, 5, 3, 9}; + ans = {4, 4, 2}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m); + } + + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector rm_vec(m); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(rm_vec.data())); + taskDataSeq->outputs_count.emplace_back(m); + + // Create Task + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(rm_vec, ans); + ASSERT_EQ(m_vec, ans); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_mpi, Test_3_4_0_0) { + int m = 4; + int n = 5; + boost::mpi::communicator world; + std::vector global_matrix; + std::vector ans; + std::vector m_vec(m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = {9, 5, 3, 9, 7, 9, 13, 4, 5, 7, 7, 9, 12, 4, 0, 5, 11, 9, 0, 7}; + ans = {3, 4, 0, 0}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m); + } + + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector rm_vec(m); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(rm_vec.data())); + taskDataSeq->outputs_count.emplace_back(m); + + // Create Task + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(rm_vec, ans); + ASSERT_EQ(m_vec, ans); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_mpi, Test_rand_10_12) { + int m = 10; + int n = 12; + + boost::mpi::communicator world; + std::vector global_matrix; + std::vector m_vec(m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::gen(m, n); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m); + } + + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector rm_vec(m); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(rm_vec.data())); + taskDataSeq->outputs_count.emplace_back(m); + + // Create Task + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(rm_vec, m_vec); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_mpi, Test_rand_10_15) { + int m = 10; + int n = 15; + boost::mpi::communicator world; + std::vector global_matrix; + std::vector m_vec(m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::gen(m, n); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m); + } + + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector rm_vec(m); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(rm_vec.data())); + taskDataSeq->outputs_count.emplace_back(m); + + // Create Task + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(rm_vec, m_vec); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_mpi, Test_rand_10_2) { + int m = 10; + int n = 2; + boost::mpi::communicator world; + std::vector global_matrix; + std::vector m_vec(m, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::gen(m, n); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m); + } + + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector rm_vec(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(rm_vec.data())); + taskDataSeq->outputs_count.emplace_back(m); + + // Create Task + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(rm_vec, m_vec); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_mpi, Test_rand_0_0) { + int m = 0; + int n = 0; + boost::mpi::communicator world; + std::vector global_matrix; + std::vector m_vec(m, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::gen(m, n); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m); + + poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} \ No newline at end of file diff --git a/tasks/mpi/poroshin_v_find_min_val_row_matrix/include/ops_mpi.hpp b/tasks/mpi/poroshin_v_find_min_val_row_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..c8938fee7dd --- /dev/null +++ b/tasks/mpi/poroshin_v_find_min_val_row_matrix/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace poroshin_v_find_min_val_row_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector gen(int m, int n); // Generate vector (matrix) + + private: + std::vector input_; // Input vector + std::vector res; // Result vector +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; // Input vector + std::vector local_input_; // Local input vector + std::vector res; // Result vector + boost::mpi::communicator world; // MPI communicator +}; + +} // namespace poroshin_v_find_min_val_row_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/poroshin_v_find_min_val_row_matrix/perf_tests/main.cpp b/tasks/mpi/poroshin_v_find_min_val_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..129effdb0c1 --- /dev/null +++ b/tasks/mpi/poroshin_v_find_min_val_row_matrix/perf_tests/main.cpp @@ -0,0 +1,100 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/poroshin_v_find_min_val_row_matrix/include/ops_mpi.hpp" + +TEST(poroshin_v_find_min_val_row_matrix_mpi, test_pipeline_run) { + int m = 1000; + int n = 4000; + boost::mpi::communicator world; + std::vector global_matrix; + std::vector m_vec(m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = std::vector(m * n, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m_vec.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < m_vec.size(); i++) { + EXPECT_EQ(1, m_vec[i]); + } + } +} + +TEST(poroshin_v_find_min_val_row_matrix_mpi, test_task_run) { + int m = 1000; + int n = 4000; + boost::mpi::communicator world; + std::vector global_matrix; + std::vector m_vec(m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = std::vector(m * n, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(m_vec.data())); + taskDataPar->outputs_count.emplace_back(m_vec.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < m_vec.size(); i++) { + EXPECT_EQ(1, m_vec[i]); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/poroshin_v_find_min_val_row_matrix/src/ops_mpi.cpp b/tasks/mpi/poroshin_v_find_min_val_row_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..88a18c22457 --- /dev/null +++ b/tasks/mpi/poroshin_v_find_min_val_row_matrix/src/ops_mpi.cpp @@ -0,0 +1,188 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/poroshin_v_find_min_val_row_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::gen(int m, int n) { + std::vector tmp(m * n); + int n1 = std::max(n, m); + int m1 = std::min(n, m); + + for (auto& t : tmp) { + t = n1 + (std::rand() % (m1 - n1 + 7)); + } + + for (int i = 0; i < m; i++) { + tmp[(std::rand() % n) + i * n] = INT_MIN; // in 1 of n columns the value must be INT_MIN (needed to check answer) + } + + return tmp; +} + +bool poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + int m = taskData->inputs_count[0]; + int n = taskData->inputs_count[1]; + int size = m * n; + input_.resize(size); + res.resize(m); + + for (int i = 0; i < size; i++) { + input_[i] = (reinterpret_cast(taskData->inputs[0])[i]); + } + + return true; +} + +bool poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + (taskData->outputs_count[0] == taskData->inputs_count[0])); +} + +bool poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + int m = taskData->inputs_count[0]; + int n = taskData->inputs_count[1]; + + for (int i = 0; i < m; i++) { + int mn = INT_MAX; + for (int j = n * i; j < n * i + n; j++) { + mn = std::min(mn, input_[j]); + } + res[i] = mn; + } + + return true; +} + +bool poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + for (size_t i = 0; i < res.size(); i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////// + +bool poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + int n = 0; + int m = 0; + int size = 0; + unsigned int delta = 0; + + if (world.rank() == 0) { + m = taskData->inputs_count[0]; + n = taskData->inputs_count[1]; + size = n * m; + if (size % world.size() == 0) { + delta = size / world.size(); + } else { + delta = size / world.size() + 1; + } + input_ = std::vector(delta * world.size(), INT_MAX); + for (int i = 0; i < size; i++) { + input_[i] = reinterpret_cast(taskData->inputs[0])[i]; + } + } + + return true; +} + +bool poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + (taskData->outputs_count[0] == taskData->inputs_count[0])); + } + + return true; +} + +bool poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + int m = 0; + int n = 0; + int size = 0; + unsigned int delta = 0; + + if (world.rank() == 0) { + m = taskData->inputs_count[0]; + n = taskData->inputs_count[1]; + size = n * m; + if (size % world.size() == 0) { + delta = size / world.size(); + } else { + delta = size / world.size() + 1; + } + } + + broadcast(world, m, 0); + broadcast(world, n, 0); + broadcast(world, delta, 0); + + local_input_ = std::vector(delta); + boost::mpi::scatter(world, input_.data(), local_input_.data(), delta, 0); + res.resize(m, INT_MAX); + unsigned int last = 0; + + if (world.rank() == world.size() - 1) { + last = local_input_.size() * world.size() - n * m; + } + unsigned int id = world.rank() * local_input_.size() / n; + + for (unsigned int i = 0; i < id; i++) { + reduce(world, INT_MAX, res[i], boost::mpi::minimum(), 0); + } + + delta = std::min(local_input_.size(), n - world.rank() * local_input_.size() % n); + int l_res = *std::min_element(local_input_.begin(), local_input_.begin() + delta); + reduce(world, l_res, res[id], boost::mpi::minimum(), 0); + id++; + unsigned int k = 0; + + while (local_input_.begin() + delta + k * n < local_input_.end() - last) { + l_res = *std::min_element(local_input_.begin() + delta + k * n, + std::min(local_input_.end(), local_input_.begin() + delta + (k + 1) * n)); + reduce(world, l_res, res[id], boost::mpi::minimum(), 0); + k++; + id++; + } + + for (unsigned int i = id; i < res.size(); i++) { + reduce(world, INT_MAX, res[i], boost::mpi::minimum(), 0); + } + + return true; +} + +bool poroshin_v_find_min_val_row_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + for (size_t i = 0; i < res.size(); i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + + return true; +} \ No newline at end of file diff --git a/tasks/seq/poroshin_v_find_min_val_row_matrix/func_tests/main.cpp b/tasks/seq/poroshin_v_find_min_val_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..1f286fb3a19 --- /dev/null +++ b/tasks/seq/poroshin_v_find_min_val_row_matrix/func_tests/main.cpp @@ -0,0 +1,257 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/poroshin_v_find_min_val_row_matrix/include/ops_seq.hpp" + +TEST(poroshin_v_find_min_val_row_matrix_seq, find_min_10x10_matrix) { + // Create data + const int n = 10; + const int m = 10; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(m); + test->inputs_count.emplace_back(n); + std::vector result(m); + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < m; i++) { + ASSERT_EQ(result[i], INT_MIN); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, find_min_100x100_matrix) { + // Create data + const int n = 100; + const int m = 100; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(m); + test->inputs_count.emplace_back(n); + std::vector result(m); + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < m; i++) { + ASSERT_EQ(result[i], INT_MIN); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, find_min_100x500_matrix) { + // Create data + const int n = 500; + const int m = 100; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(m); + test->inputs_count.emplace_back(n); + std::vector result(m); + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < m; i++) { + ASSERT_EQ(result[i], INT_MIN); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, find_min_500x100_matrix) { + // Create data + const int n = 100; + const int m = 500; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(m); + test->inputs_count.emplace_back(n); + std::vector result(m); + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < m; i++) { + ASSERT_EQ(result[i], INT_MIN); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, find_min_2500x2500_matrix) { + // Create data + const int n = 2500; + const int m = 2500; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(m); + test->inputs_count.emplace_back(n); + std::vector result(m); + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < m; i++) { + ASSERT_EQ(result[i], INT_MIN); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, validation_input_empty_100x100_matrix) { + // Create data + const int n = 100; + const int m = 100; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs_count.emplace_back(m); + test->inputs_count.emplace_back(n); + std::vector result(m); + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, validation_output_empty_100x100_matrix) { + // Create data + const int n = 100; + const int m = 100; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(m); + test->inputs_count.emplace_back(n); + std::vector result(m); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, validation_less_two_1_empty_100x100_matrix) { + // Create data + const int n = 100; + const int m = 100; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(m); + std::vector result(m); + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, validation_less_two_2_empty_100x100_matrix) { + // Create data + const int n = 100; + const int m = 100; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(n); + std::vector result(m); + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, validation_find_min_0x100_matrix) { + // Create data + const int n = 100; + const int m = 0; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(m); + test->inputs_count.emplace_back(n); + std::vector result(m); + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, validation_fails_on_invalid_output_size) { + // Create data + const int n = 100; + const int m = 100; + + // Create TaskData + std::shared_ptr test = std::make_shared(); + poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(test); + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + test->inputs.emplace_back(reinterpret_cast(tmp.data())); + test->inputs_count.emplace_back(m); + test->inputs_count.emplace_back(n); + std::vector result(m - 1); // must be m + test->outputs.emplace_back(reinterpret_cast(result.data())); + test->outputs_count.emplace_back(m - 1); // must be m + + ASSERT_EQ(testTaskSequential.validation(), false); +} \ No newline at end of file diff --git a/tasks/seq/poroshin_v_find_min_val_row_matrix/include/ops_seq.hpp b/tasks/seq/poroshin_v_find_min_val_row_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..a3f821d4fc3 --- /dev/null +++ b/tasks/seq/poroshin_v_find_min_val_row_matrix/include/ops_seq.hpp @@ -0,0 +1,28 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace poroshin_v_find_min_val_row_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector gen(int m, int n); // generate vector (matrix) + + private: + std::vector input_{}, res{}; + // notation for TaskData + // inputs - vector (matrix) + // inputs_count[0] - m, inputs_count[1] - n + // m - num of rows, n - num of columns +}; + +} // namespace poroshin_v_find_min_val_row_matrix_seq \ No newline at end of file diff --git a/tasks/seq/poroshin_v_find_min_val_row_matrix/perf_tests/main.cpp b/tasks/seq/poroshin_v_find_min_val_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..abb2b604fa3 --- /dev/null +++ b/tasks/seq/poroshin_v_find_min_val_row_matrix/perf_tests/main.cpp @@ -0,0 +1,93 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/poroshin_v_find_min_val_row_matrix/include/ops_seq.hpp" + +TEST(poroshin_v_find_min_val_row_matrix_seq, test_pipeline_run) { + // Create data + const int n = 5000; + const int m = 5000; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(tmp.data())); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + std::vector result(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataSeq->outputs_count.emplace_back(m); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Set the number of runs as needed + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < m; i++) { + ASSERT_EQ(result[i], INT_MIN); + } +} + +TEST(poroshin_v_find_min_val_row_matrix_seq, test_task_run) { + // Create data + const int n = 5000; + const int m = 5000; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector tmp = poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(m, n); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(tmp.data())); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + std::vector result(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataSeq->outputs_count.emplace_back(m); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < m; i++) { + ASSERT_EQ(result[i], INT_MIN); + } +} \ No newline at end of file diff --git a/tasks/seq/poroshin_v_find_min_val_row_matrix/src/ops_seq.cpp b/tasks/seq/poroshin_v_find_min_val_row_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..7002916bb01 --- /dev/null +++ b/tasks/seq/poroshin_v_find_min_val_row_matrix/src/ops_seq.cpp @@ -0,0 +1,74 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/poroshin_v_find_min_val_row_matrix/include/ops_seq.hpp" + +#include // for INT_MAX and INT_MIN +#include + +using namespace std::chrono_literals; + +bool poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + int m = taskData->inputs_count[0]; + int n = taskData->inputs_count[1]; + int size = m * n; + + input_.resize(size); + res.resize(m); + + for (int i = 0; i < size; i++) { + input_[i] = reinterpret_cast(taskData->inputs[0])[i]; + } + + return true; +} + +bool poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + (taskData->outputs_count[0] == taskData->inputs_count[0])); +} + +bool poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + int m = taskData->inputs_count[0]; + int n = taskData->inputs_count[1]; + + int mn; + for (int i = 0; i < m; i++) { + mn = std::numeric_limits::max(); // Use std::numeric_limits for INT_MAX + for (int j = n * i; j < n * i + n; j++) { + mn = std::min(mn, input_[j]); + } + res[i] = mn; + } + + return true; +} + +bool poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (size_t i = 0; i < res.size(); i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} + +std::vector poroshin_v_find_min_val_row_matrix_seq::TestTaskSequential::gen(int m, int n) { + std::vector tmp(m * n); + int n1 = std::max(n, m); + int m1 = std::min(n, m); + + for (auto& t : tmp) { + t = n1 + (std::rand() % (m1 - n1 + 7)); + } + + for (int i = 0; i < m; i++) { + tmp[(std::rand() % n) + i * n] = + std::numeric_limits::min(); // In 1 of n columns, the value must be INT_MIN (needed to check answer) + } + + return tmp; +} \ No newline at end of file From 6ef284d8830244e273f2df7095e90d9a0d588896 Mon Sep 17 00:00:00 2001 From: sk1er52 Date: Mon, 4 Nov 2024 22:10:25 +0300 Subject: [PATCH 101/155] =?UTF-8?q?=D0=90=D0=BD=D1=83=D1=84=D1=80=D0=B8?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=94=D0=B0=D0=BD=D0=B8=D0=B8=D0=BB.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=203.=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0?= =?UTF-8?q?.=20(#142)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit seq: Итерация по элементам вектора и сравнение с максимальным элементом в данный момент. mpi: Код параллельно находит максимальный элемент вектора, распределяя его по MPI процессам. scatterv раздает части вектора, каждый процесс находит свой локальный максимум, а reduce определяет глобальный максимум на процессе 0, который затем записывается в выходные данные. --- .../func_tests/main_anufriev.cpp | 110 ++++++++++++++ .../include/ops_mpi_anufriev.hpp | 41 ++++++ .../perf_tests/main_anufriev.cpp | 73 +++++++++ .../src/ops_mpi_anufriev.cpp | 113 ++++++++++++++ .../func_tests/main_anufriev.cpp | 138 ++++++++++++++++++ .../include/ops_seq_anufriev.hpp | 23 +++ .../perf_tests/main_anufriev.cpp | 72 +++++++++ .../src/ops_seq_anufriev.cpp | 47 ++++++ 8 files changed, 617 insertions(+) create mode 100644 tasks/mpi/anufriev_d_max_of_vector_elements/func_tests/main_anufriev.cpp create mode 100644 tasks/mpi/anufriev_d_max_of_vector_elements/include/ops_mpi_anufriev.hpp create mode 100644 tasks/mpi/anufriev_d_max_of_vector_elements/perf_tests/main_anufriev.cpp create mode 100644 tasks/mpi/anufriev_d_max_of_vector_elements/src/ops_mpi_anufriev.cpp create mode 100644 tasks/seq/anufriev_d_max_of_vector_elements/func_tests/main_anufriev.cpp create mode 100644 tasks/seq/anufriev_d_max_of_vector_elements/include/ops_seq_anufriev.hpp create mode 100644 tasks/seq/anufriev_d_max_of_vector_elements/perf_tests/main_anufriev.cpp create mode 100644 tasks/seq/anufriev_d_max_of_vector_elements/src/ops_seq_anufriev.cpp diff --git a/tasks/mpi/anufriev_d_max_of_vector_elements/func_tests/main_anufriev.cpp b/tasks/mpi/anufriev_d_max_of_vector_elements/func_tests/main_anufriev.cpp new file mode 100644 index 00000000000..69ca9461a85 --- /dev/null +++ b/tasks/mpi/anufriev_d_max_of_vector_elements/func_tests/main_anufriev.cpp @@ -0,0 +1,110 @@ +#include + +#include +#include +#include +#include + +#include "mpi/anufriev_d_max_of_vector_elements/include/ops_mpi_anufriev.hpp" + +void run_parallel_and_sequential_tasks(std::vector& input_vector, int32_t expected_max) { + boost::mpi::communicator world; + int32_t result_parallel = std::numeric_limits::min(); + int32_t result_sequential = std::numeric_limits::min(); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input_vector.data())); + taskDataPar->inputs_count.emplace_back(input_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&result_parallel)); + taskDataPar->outputs_count.emplace_back(1); + } + + anufriev_d_max_of_vector_elements_parallel::VectorMaxPar testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.validation(); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_vector.data())); + taskDataSeq->inputs_count.emplace_back(input_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&result_sequential)); + taskDataSeq->outputs_count.emplace_back(1); + + anufriev_d_max_of_vector_elements_parallel::VectorMaxSeq testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.validation(); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(result_sequential, result_parallel); + ASSERT_EQ(result_sequential, expected_max); + } +} + +TEST(anufriev_d_max_of_vector_elements_mpi, randomVector50000) { + boost::mpi::communicator world; + std::vector input_vector; + + if (world.rank() == 0) { + input_vector = anufriev_d_max_of_vector_elements_parallel::make_random_vector(50000, -500, 5000); + } + + boost::mpi::broadcast(world, input_vector, 0); + + int32_t expected_max = std::numeric_limits::min(); + if (world.rank() == 0) { + expected_max = *std::max_element(input_vector.begin(), input_vector.end()); + } + + run_parallel_and_sequential_tasks(input_vector, expected_max); +} + +TEST(anufriev_d_max_of_vector_elements_mpi, regularVector) { + std::vector input_vector = {1, 2, 3, -5, 3, 43}; + run_parallel_and_sequential_tasks(input_vector, 43); +} + +TEST(anufriev_d_max_of_vector_elements_mpi, positiveNumbers) { + std::vector input_vector = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + run_parallel_and_sequential_tasks(input_vector, 10); +} + +TEST(anufriev_d_max_of_vector_elements_mpi, negativeNumbers) { + std::vector input_vector = {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10}; + run_parallel_and_sequential_tasks(input_vector, -1); +} + +TEST(anufriev_d_max_of_vector_elements_mpi, zeroVector) { + std::vector input_vector = {0, 0, 0, 0, 0}; + run_parallel_and_sequential_tasks(input_vector, 0); +} + +TEST(anufriev_d_max_of_vector_elements_mpi, tinyVector) { + std::vector input_vector = {4, -20}; + run_parallel_and_sequential_tasks(input_vector, 4); +} + +TEST(anufriev_d_max_of_vector_elements_mpi, emptyVector) { + std::vector input_vector = {}; + run_parallel_and_sequential_tasks(input_vector, std::numeric_limits::min()); +} + +TEST(anufriev_d_max_of_vector_elements_mpi, validationNotPassed) { + boost::mpi::communicator world; + std::vector input = {1, 2, 3, -5}; + std::shared_ptr taskData = std::make_shared(); + + if (world.rank() == 0) { + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + } + + anufriev_d_max_of_vector_elements_parallel::VectorMaxPar vectorMaxPar(taskData); + + if (world.rank() == 0) { + ASSERT_FALSE(vectorMaxPar.validation()); + } +} \ No newline at end of file diff --git a/tasks/mpi/anufriev_d_max_of_vector_elements/include/ops_mpi_anufriev.hpp b/tasks/mpi/anufriev_d_max_of_vector_elements/include/ops_mpi_anufriev.hpp new file mode 100644 index 00000000000..a011bd47a02 --- /dev/null +++ b/tasks/mpi/anufriev_d_max_of_vector_elements/include/ops_mpi_anufriev.hpp @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace anufriev_d_max_of_vector_elements_parallel { + +[[nodiscard]] std::vector make_random_vector(int32_t size, int32_t val_min, int32_t val_max); + +class VectorMaxSeq : public ppc::core::Task { + public: + explicit VectorMaxSeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int32_t max_ = std::numeric_limits::min(); +}; + +class VectorMaxPar : public ppc::core::Task { + public: + explicit VectorMaxPar(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int32_t max_ = std::numeric_limits::min(); + boost::mpi::communicator world; +}; + +} // namespace anufriev_d_max_of_vector_elements_parallel \ No newline at end of file diff --git a/tasks/mpi/anufriev_d_max_of_vector_elements/perf_tests/main_anufriev.cpp b/tasks/mpi/anufriev_d_max_of_vector_elements/perf_tests/main_anufriev.cpp new file mode 100644 index 00000000000..fb3d6b1b514 --- /dev/null +++ b/tasks/mpi/anufriev_d_max_of_vector_elements/perf_tests/main_anufriev.cpp @@ -0,0 +1,73 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/anufriev_d_max_of_vector_elements/include/ops_mpi_anufriev.hpp" + +TEST(anufriev_d_max_of_vector_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector input_vector; + int32_t result_parallel = std::numeric_limits::min(); + std::shared_ptr taskDataPar = std::make_shared(); + int vector_size = 50000000; + + if (world.rank() == 0) { + input_vector.resize(vector_size, 1); + input_vector[vector_size / 2] = 10; + taskDataPar->inputs.emplace_back(reinterpret_cast(input_vector.data())); + taskDataPar->inputs_count.emplace_back(input_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&result_parallel)); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(10, result_parallel); + } +} + +TEST(anufriev_d_max_of_vector_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector input_vector; + int32_t result_parallel = std::numeric_limits::min(); + std::shared_ptr taskDataPar = std::make_shared(); + int vector_size = 50000000; + + if (world.rank() == 0) { + input_vector.resize(vector_size, 1); + input_vector[0] = -5; + taskDataPar->inputs.emplace_back(reinterpret_cast(input_vector.data())); + taskDataPar->inputs_count.emplace_back(input_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&result_parallel)); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, result_parallel); + } +} \ No newline at end of file diff --git a/tasks/mpi/anufriev_d_max_of_vector_elements/src/ops_mpi_anufriev.cpp b/tasks/mpi/anufriev_d_max_of_vector_elements/src/ops_mpi_anufriev.cpp new file mode 100644 index 00000000000..e5fa23285b4 --- /dev/null +++ b/tasks/mpi/anufriev_d_max_of_vector_elements/src/ops_mpi_anufriev.cpp @@ -0,0 +1,113 @@ +#include "mpi/anufriev_d_max_of_vector_elements/include/ops_mpi_anufriev.hpp" + +#include +#include +#include + +namespace anufriev_d_max_of_vector_elements_parallel { + +std::vector make_random_vector(int32_t size, int32_t val_min, int32_t val_max) { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> distrib(val_min, val_max); + + std::vector new_vector(size); + std::generate(new_vector.begin(), new_vector.end(), [&]() { return distrib(gen); }); + return new_vector; +} + +// Sequential Version +bool VectorMaxSeq::validation() { + internal_order_test(); + return !taskData->outputs.empty() && taskData->outputs_count[0] == 1; +} + +bool VectorMaxSeq::pre_processing() { + internal_order_test(); + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + input_.resize(taskData->inputs_count[0]); + std::copy(input_ptr, input_ptr + taskData->inputs_count[0], input_.begin()); + return true; +} + +bool VectorMaxSeq::run() { + internal_order_test(); + if (input_.empty()) { + return true; + } + max_ = input_[0]; + for (int32_t num : input_) { + if (num > max_) { + max_ = num; + } + } + return true; +} + +bool VectorMaxSeq::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = max_; + return true; +} + +// Parallel Version +bool VectorMaxPar::validation() { + internal_order_test(); + return !taskData->outputs.empty() && taskData->outputs_count[0] == 1; +} + +bool VectorMaxPar::pre_processing() { + internal_order_test(); + max_ = std::numeric_limits::min(); + return true; +} + +bool VectorMaxPar::run() { + internal_order_test(); + + int my_rank = world.rank(); + int world_size = world.size(); + int total_size = 0; + + if (my_rank == 0) { + total_size = taskData->inputs_count[0]; + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + input_.assign(input_ptr, input_ptr + total_size); + } + + boost::mpi::broadcast(world, total_size, 0); + + int local_size = total_size / world_size + (my_rank < (total_size % world_size) ? 1 : 0); + std::vector send_counts(world_size, total_size / world_size); + std::vector offsets(world_size, 0); + + for (int i = 0; i < total_size % world_size; ++i) { + send_counts[i]++; + } + for (int i = 1; i < world_size; ++i) { + offsets[i] = offsets[i - 1] + send_counts[i - 1]; + } + + local_input_.resize(send_counts[my_rank]); + boost::mpi::scatterv(world, input_.data(), send_counts, offsets, local_input_.data(), local_size, 0); + + int32_t local_max = std::numeric_limits::min(); + for (int32_t num : local_input_) { + if (num > local_max) { + local_max = num; + } + } + boost::mpi::reduce(world, local_max, max_, boost::mpi::maximum(), 0); + + return true; +} + +bool VectorMaxPar::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = max_; + } + return true; +} + +} // namespace anufriev_d_max_of_vector_elements_parallel \ No newline at end of file diff --git a/tasks/seq/anufriev_d_max_of_vector_elements/func_tests/main_anufriev.cpp b/tasks/seq/anufriev_d_max_of_vector_elements/func_tests/main_anufriev.cpp new file mode 100644 index 00000000000..179bcaaf51f --- /dev/null +++ b/tasks/seq/anufriev_d_max_of_vector_elements/func_tests/main_anufriev.cpp @@ -0,0 +1,138 @@ +#include + +#include +#include +#include + +#include "seq/anufriev_d_max_of_vector_elements/include/ops_seq_anufriev.hpp" + +TEST(anufriev_d_max_of_vector_elements, regularVector) { + std::vector input = {1, 2, 3, -5, 3, 43}; + int32_t expected = 43; + int32_t actual = std::numeric_limits::min(); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + anufriev_d_max_of_vector_elements_seq::VectorMaxSeq vectorMaxSeq(taskData); + ASSERT_TRUE(vectorMaxSeq.validation()); + vectorMaxSeq.pre_processing(); + vectorMaxSeq.run(); + vectorMaxSeq.post_processing(); + ASSERT_EQ(expected, actual); +} + +TEST(anufriev_d_max_of_vector_elements, positiveNumbers) { + std::vector input = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + int32_t expected = 10; + int32_t actual = std::numeric_limits::min(); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + anufriev_d_max_of_vector_elements_seq::VectorMaxSeq vectorMaxSeq(taskData); + ASSERT_TRUE(vectorMaxSeq.validation()); + vectorMaxSeq.pre_processing(); + vectorMaxSeq.run(); + vectorMaxSeq.post_processing(); + ASSERT_EQ(expected, actual); +} + +TEST(anufriev_d_max_of_vector_elements, negativeNumbers) { + std::vector input = {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10}; + int32_t expected = -1; + int32_t actual = std::numeric_limits::min(); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + anufriev_d_max_of_vector_elements_seq::VectorMaxSeq vectorMaxSeq(taskData); + ASSERT_TRUE(vectorMaxSeq.validation()); + vectorMaxSeq.pre_processing(); + vectorMaxSeq.run(); + vectorMaxSeq.post_processing(); + ASSERT_EQ(expected, actual); +} + +TEST(anufriev_d_max_of_vector_elements, zeroVector) { + std::vector input = {0, 0, 0, 0}; + int32_t expected = 0; + int32_t actual = std::numeric_limits::min(); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + anufriev_d_max_of_vector_elements_seq::VectorMaxSeq vectorMaxSeq(taskData); + ASSERT_TRUE(vectorMaxSeq.validation()); + vectorMaxSeq.pre_processing(); + vectorMaxSeq.run(); + vectorMaxSeq.post_processing(); + ASSERT_EQ(expected, actual); +} + +TEST(anufriev_d_max_of_vector_elements, randomVector) { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> distrib(-1000, 1000); + + std::vector input_vector(50000); + std::generate(input_vector.begin(), input_vector.end(), [&]() { return distrib(gen); }); + + int32_t expected_max = *std::max_element(input_vector.begin(), input_vector.end()); + + int32_t actual_max = std::numeric_limits::min(); + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input_vector.size()); + taskData->inputs.emplace_back(reinterpret_cast(input_vector.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual_max)); + + anufriev_d_max_of_vector_elements_seq::VectorMaxSeq vectorMaxSeq(taskData); + ASSERT_TRUE(vectorMaxSeq.validation()); + vectorMaxSeq.pre_processing(); + vectorMaxSeq.run(); + vectorMaxSeq.post_processing(); + + ASSERT_EQ(expected_max, actual_max); +} + +TEST(anufriev_d_max_of_vector_elements, emptyVector) { + std::vector input = {}; + int32_t actual = std::numeric_limits::min(); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->outputs_count.emplace_back(1); + taskData->outputs.emplace_back(reinterpret_cast(&actual)); + + anufriev_d_max_of_vector_elements_seq::VectorMaxSeq vectorMaxSeq(taskData); + ASSERT_TRUE(vectorMaxSeq.validation()); + vectorMaxSeq.pre_processing(); + vectorMaxSeq.run(); + vectorMaxSeq.post_processing(); + ASSERT_EQ(std::numeric_limits::min(), actual); +} + +TEST(anufriev_d_max_of_vector_elements, validationNotPassed) { + std::vector input = {1, 2, 3, -5}; + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs_count.emplace_back(input.size()); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + + anufriev_d_max_of_vector_elements_seq::VectorMaxSeq vectorMaxSeq(taskData); + ASSERT_FALSE(vectorMaxSeq.validation()); +} \ No newline at end of file diff --git a/tasks/seq/anufriev_d_max_of_vector_elements/include/ops_seq_anufriev.hpp b/tasks/seq/anufriev_d_max_of_vector_elements/include/ops_seq_anufriev.hpp new file mode 100644 index 00000000000..84500274b1f --- /dev/null +++ b/tasks/seq/anufriev_d_max_of_vector_elements/include/ops_seq_anufriev.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace anufriev_d_max_of_vector_elements_seq { + +class VectorMaxSeq : public ppc::core::Task { + public: + explicit VectorMaxSeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int32_t max_ = std::numeric_limits::min(); +}; + +} // namespace anufriev_d_max_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/anufriev_d_max_of_vector_elements/perf_tests/main_anufriev.cpp b/tasks/seq/anufriev_d_max_of_vector_elements/perf_tests/main_anufriev.cpp new file mode 100644 index 00000000000..e1a485cdba9 --- /dev/null +++ b/tasks/seq/anufriev_d_max_of_vector_elements/perf_tests/main_anufriev.cpp @@ -0,0 +1,72 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/anufriev_d_max_of_vector_elements/include/ops_seq_anufriev.hpp" + +TEST(anufriev_d_max_of_vector_elements_seq, test_pipeline_run) { + const int32_t vec_size = 50000000; + std::vector input_data(vec_size, 1); + input_data[vec_size / 2] = 10; + int32_t expected_max = 10; + int32_t actual_max = std::numeric_limits::min(); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskDataSeq->inputs_count.emplace_back(input_data.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&actual_max)); + taskDataSeq->outputs_count.emplace_back(1); + + auto vectorMaxSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(vectorMaxSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_max, actual_max); +} + +TEST(anufriev_d_max_of_vector_elements_seq, first_negative) { + const int32_t count = 50000000; + std::vector input_data(count, 1); + input_data[0] = -5; + int32_t expected_max = 1; + int32_t actual_max = std::numeric_limits::min(); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskDataSeq->inputs_count.emplace_back(input_data.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&actual_max)); + taskDataSeq->outputs_count.emplace_back(1); + + auto vectorMaxSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(vectorMaxSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_max, actual_max); +} diff --git a/tasks/seq/anufriev_d_max_of_vector_elements/src/ops_seq_anufriev.cpp b/tasks/seq/anufriev_d_max_of_vector_elements/src/ops_seq_anufriev.cpp new file mode 100644 index 00000000000..557e2b3348b --- /dev/null +++ b/tasks/seq/anufriev_d_max_of_vector_elements/src/ops_seq_anufriev.cpp @@ -0,0 +1,47 @@ +#include "seq/anufriev_d_max_of_vector_elements/include/ops_seq_anufriev.hpp" + +#include + +namespace anufriev_d_max_of_vector_elements_seq { + +bool VectorMaxSeq::validation() { + internal_order_test(); + + return !taskData->outputs.empty() && taskData->outputs_count[0] == 1; +} + +bool VectorMaxSeq::pre_processing() { + internal_order_test(); + + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + input_.resize(taskData->inputs_count[0]); + std::copy(input_ptr, input_ptr + taskData->inputs_count[0], input_.begin()); + + return true; +} + +bool VectorMaxSeq::run() { + internal_order_test(); + + if (input_.empty()) { + return true; + } + + max_ = input_[0]; + for (int32_t num : input_) { + if (num > max_) { + max_ = num; + } + } + + return true; +} + +bool VectorMaxSeq::post_processing() { + internal_order_test(); + + *reinterpret_cast(taskData->outputs[0]) = max_; + return true; +} + +} // namespace anufriev_d_max_of_vector_elements_seq \ No newline at end of file From 4963d98733526d71b4ae20407f2c70b1e580d036 Mon Sep 17 00:00:00 2001 From: Ksu04 <113617253+Ksu04@users.noreply.github.com> Date: Mon, 4 Nov 2024 22:11:40 +0300 Subject: [PATCH 102/155] =?UTF-8?q?=D0=92=D0=B5=D0=B4=D0=B5=D1=80=D0=BD?= =?UTF-8?q?=D0=B8=D0=BA=D0=BE=D0=B2=D0=B0=20=D0=9A=D1=81=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D1=8F.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2024.=20=D0=9F=D0=BE=D0=B4?= =?UTF-8?q?=D1=81=D1=87=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20?= =?UTF-8?q?=D1=81=D0=BB=D0=BE=D0=B2=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE?= =?UTF-8?q?=D0=BA=D0=B5=20(#144)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание** - **Последовательная задача.** Программа является конечным автоматом с двумя состояниями - пробел (is_space) и не-пробел. Когда состояние меняется с "пробел" на "не-пробел", счетчик слов увеличивается. - **Параллельная задача.** Подсчет слов работает аналогично. Рассылка данных происходит с перехлестом в левую сторону, чтобы избежать двойного учета одного слова . --- .../func_tests/main.cpp | 98 +++++++++++++ .../include/ops_mpi.hpp | 45 ++++++ .../perf_tests/main.cpp | 56 ++++++++ .../src/ops_mpi.cpp | 129 ++++++++++++++++++ .../func_tests/main.cpp | 43 ++++++ .../include/ops_seq.hpp | 24 ++++ .../perf_tests/main.cpp | 52 +++++++ .../src/ops_seq.cpp | 46 +++++++ 8 files changed, 493 insertions(+) create mode 100644 tasks/mpi/vedernikova_k_word_num_in_str/func_tests/main.cpp create mode 100644 tasks/mpi/vedernikova_k_word_num_in_str/include/ops_mpi.hpp create mode 100644 tasks/mpi/vedernikova_k_word_num_in_str/perf_tests/main.cpp create mode 100644 tasks/mpi/vedernikova_k_word_num_in_str/src/ops_mpi.cpp create mode 100644 tasks/seq/vedernikova_k_word_num_in_str/func_tests/main.cpp create mode 100644 tasks/seq/vedernikova_k_word_num_in_str/include/ops_seq.hpp create mode 100644 tasks/seq/vedernikova_k_word_num_in_str/perf_tests/main.cpp create mode 100644 tasks/seq/vedernikova_k_word_num_in_str/src/ops_seq.cpp diff --git a/tasks/mpi/vedernikova_k_word_num_in_str/func_tests/main.cpp b/tasks/mpi/vedernikova_k_word_num_in_str/func_tests/main.cpp new file mode 100644 index 00000000000..186740ec04e --- /dev/null +++ b/tasks/mpi/vedernikova_k_word_num_in_str/func_tests/main.cpp @@ -0,0 +1,98 @@ +#include + +#include +#include +#include +#include +#include + +#include "../include/ops_mpi.hpp" + +void run_test(std::string &&in) { + boost::mpi::communicator world; + + size_t out = 0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&out)); + taskDataPar->outputs_count.emplace_back(1); + } + + vedernikova_k_word_num_in_str_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + size_t ref = 0; + + std::shared_ptr taskDataSeq = std::make_shared(*taskDataPar); + taskDataSeq->outputs[0] = reinterpret_cast(&ref); + + vedernikova_k_word_num_in_str_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + EXPECT_EQ(out, ref); + } +} + +std::string make_random_sentence(size_t length) { + std::string buf(length, ' '); + + std::random_device dev; + std::mt19937 gen(dev()); + std::uniform_real_distribution<> dist(0., 1.); + + for (size_t i = 0; i < length; i++) { + if (dist(gen) > 0.6) { + buf[i] = 'a'; + } + } + + return buf; +} + +TEST(vedernikova_k_word_num_in_str_mpi, empty) { run_test(""); } + +TEST(vedernikova_k_word_num_in_str_mpi, empty_strlen_1) { run_test(std::string(1, ' ')); } +TEST(vedernikova_k_word_num_in_str_mpi, empty_strlen_2) { run_test(std::string(2, ' ')); } +TEST(vedernikova_k_word_num_in_str_mpi, empty_strlen_3) { run_test(std::string(3, ' ')); } +TEST(vedernikova_k_word_num_in_str_mpi, empty_strlen_4) { run_test(std::string(4, ' ')); } +TEST(vedernikova_k_word_num_in_str_mpi, empty_strlen_5) { run_test(std::string(5, ' ')); } + +TEST(vedernikova_k_word_num_in_str_mpi, strlen_1) { run_test("1"); } +TEST(vedernikova_k_word_num_in_str_mpi, strlen_2) { run_test("2"); } +TEST(vedernikova_k_word_num_in_str_mpi, strlen_3) { run_test("3"); } +TEST(vedernikova_k_word_num_in_str_mpi, strlen_4) { run_test("4"); } + +TEST(vedernikova_k_word_num_in_str_mpi, words_1) { run_test("Hello"); } +TEST(vedernikova_k_word_num_in_str_mpi, words_1_leading) { run_test(" Hello"); } +TEST(vedernikova_k_word_num_in_str_mpi, words_1_trailing) { run_test("Hello "); } +TEST(vedernikova_k_word_num_in_str_mpi, words_1_padded) { run_test(" Hello "); } + +TEST(vedernikova_k_word_num_in_str_mpi, words_2) { run_test("Hello World"); } +TEST(vedernikova_k_word_num_in_str_mpi, words_2_leading) { run_test(" Hello World"); } +TEST(vedernikova_k_word_num_in_str_mpi, words_2_trailing) { run_test("Hello World "); } +TEST(vedernikova_k_word_num_in_str_mpi, words_2_padded) { run_test(" Hello World "); } +TEST(vedernikova_k_word_num_in_str_mpi, words_2_inner) { run_test("Hello World"); } + +TEST(vedernikova_k_word_num_in_str_mpi, words_3) { run_test("1 2 3"); } + +TEST(vedernikova_k_word_num_in_str_seq, padded_corner) { run_test(" a "); } + +TEST(vedernikova_k_word_num_in_str_mpi, random_3) { run_test(make_random_sentence(3)); } +TEST(vedernikova_k_word_num_in_str_mpi, random_4) { run_test(make_random_sentence(4)); } +TEST(vedernikova_k_word_num_in_str_mpi, random_5) { run_test(make_random_sentence(5)); } + +TEST(vedernikova_k_word_num_in_str_mpi, random_64) { run_test(make_random_sentence(64)); } +TEST(vedernikova_k_word_num_in_str_mpi, random_128) { run_test(make_random_sentence(128)); } +TEST(vedernikova_k_word_num_in_str_mpi, random_256) { run_test(make_random_sentence(256)); } +TEST(vedernikova_k_word_num_in_str_mpi, random_512) { run_test(make_random_sentence(512)); } +TEST(vedernikova_k_word_num_in_str_mpi, random_1024) { run_test(make_random_sentence(512)); } \ No newline at end of file diff --git a/tasks/mpi/vedernikova_k_word_num_in_str/include/ops_mpi.hpp b/tasks/mpi/vedernikova_k_word_num_in_str/include/ops_mpi.hpp new file mode 100644 index 00000000000..fd0b5f5b2b8 --- /dev/null +++ b/tasks/mpi/vedernikova_k_word_num_in_str/include/ops_mpi.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace vedernikova_k_word_num_in_str_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + size_t res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + size_t res_{}; + + boost::mpi::communicator world; +}; + +} // namespace vedernikova_k_word_num_in_str_mpi \ No newline at end of file diff --git a/tasks/mpi/vedernikova_k_word_num_in_str/perf_tests/main.cpp b/tasks/mpi/vedernikova_k_word_num_in_str/perf_tests/main.cpp new file mode 100644 index 00000000000..15d39a93317 --- /dev/null +++ b/tasks/mpi/vedernikova_k_word_num_in_str/perf_tests/main.cpp @@ -0,0 +1,56 @@ +#include + +#include +#include +#include + +#include "../include/ops_mpi.hpp" +#include "core/perf/include/perf.hpp" + +void run_test(std::string &&in, size_t solution, + const std::function, + const std::shared_ptr)> &executor) { + boost::mpi::communicator world; + + size_t out = 0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&out)); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + ppc::core::Perf perfAnalyzer(testMpiTaskParallel); + executor(perfAnalyzer, perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_EQ(out, solution); + } +} +void run_test(const std::function, + const std::shared_ptr)> &executor) { + run_test("Sentence for word counter test", 5, executor); +} + +TEST(vedernikova_k_word_num_in_str_mpi_perf_test, test_pipeline_run) { + run_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.pipeline_run(perfAttr, perfResults); + }); +} + +TEST(vedernikova_k_word_num_in_str_mpi_perf_test, test_task_run) { + run_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.task_run(perfAttr, perfResults); + }); +} diff --git a/tasks/mpi/vedernikova_k_word_num_in_str/src/ops_mpi.cpp b/tasks/mpi/vedernikova_k_word_num_in_str/src/ops_mpi.cpp new file mode 100644 index 00000000000..1cf7b24085b --- /dev/null +++ b/tasks/mpi/vedernikova_k_word_num_in_str/src/ops_mpi.cpp @@ -0,0 +1,129 @@ +#include "../include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +bool vedernikova_k_word_num_in_str_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + return taskData->outputs_count[0] == 1; +} + +bool vedernikova_k_word_num_in_str_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + input_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + + res_ = 0; + + return true; +} + +bool vedernikova_k_word_num_in_str_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + bool is_space = input_[0] == ' '; + for (const char c : input_) { + if (c == ' ') { + if (!is_space) { + res_++; + } + is_space = true; + continue; + } + is_space = false; + } + res_ += (is_space || input_.empty()) ? 0 : 1; + + return true; +} + +bool vedernikova_k_word_num_in_str_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + *reinterpret_cast(taskData->outputs[0]) = res_; + + return true; +} + +// + +bool vedernikova_k_word_num_in_str_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + return world.rank() != 0 || taskData->outputs_count[0] == 1; +} + +bool vedernikova_k_word_num_in_str_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + input_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + } + res_ = 0; + + return true; +} + +bool vedernikova_k_word_num_in_str_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + size_t load{}; + if (world.rank() == 0) { + load = input_.size() / world.size(); + } + boost::mpi::broadcast(world, load, 0); + const size_t underlap = 1; + + std::string local_input; + if (world.rank() == 0) { + const size_t extra_load = input_.size() % world.size(); + + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * load + extra_load - underlap, load + underlap); + } + + local_input.assign(input_, 0, load + extra_load); + } else { + local_input.resize(load + underlap); + world.recv(0, 0, local_input.data(), load + underlap); + } + + size_t local_res = 0; + auto it = local_input.begin(); + if (world.rank() != 0 && it != local_input.end()) { + bool skip_is_space = *it == ' '; + for (; it != local_input.end() && (skip_is_space == (*it == ' ')); ++it); + } + const bool ended = it == local_input.end(); + bool is_space = !ended && *it == ' '; + + for (; it != local_input.end(); ++it) { + if (*it == ' ') { + if (!is_space) { + local_res++; + } + is_space = true; + continue; + } + is_space = false; + } + local_res += (ended || is_space || local_input.empty()) ? 0 : 1; + + boost::mpi::reduce(world, local_res, res_, std::plus(), 0); + + return true; +} + +bool vedernikova_k_word_num_in_str_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res_; + } + return true; +} diff --git a/tasks/seq/vedernikova_k_word_num_in_str/func_tests/main.cpp b/tasks/seq/vedernikova_k_word_num_in_str/func_tests/main.cpp new file mode 100644 index 00000000000..ff11cc08121 --- /dev/null +++ b/tasks/seq/vedernikova_k_word_num_in_str/func_tests/main.cpp @@ -0,0 +1,43 @@ +#include + +#include + +#include "../include/ops_seq.hpp" + +void run_test(std::string &&in, size_t solution) { + size_t out = 0; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(1); + + vedernikova_k_word_num_in_str_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + EXPECT_EQ(solution, out); +} + +TEST(vedernikova_k_word_num_in_str_seq, empty) { run_test("", 0); } +TEST(vedernikova_k_word_num_in_str_seq, empty_strlen_1) { run_test(std::string(1, ' '), 0); } +TEST(vedernikova_k_word_num_in_str_seq, empty_strlen_2) { run_test(std::string(2, ' '), 0); } +TEST(vedernikova_k_word_num_in_str_seq, empty_strlen_3) { run_test(std::string(3, ' '), 0); } +TEST(vedernikova_k_word_num_in_str_seq, empty_strlen_4) { run_test(std::string(4, ' '), 0); } +TEST(vedernikova_k_word_num_in_str_seq, empty_strlen_5) { run_test(std::string(5, ' '), 0); } + +TEST(vedernikova_k_word_num_in_str_seq, words_1) { run_test("Hello", 1); } +TEST(vedernikova_k_word_num_in_str_seq, words_1_leading) { run_test(" Hello", 1); } +TEST(vedernikova_k_word_num_in_str_seq, words_1_trailing) { run_test("Hello ", 1); } +TEST(vedernikova_k_word_num_in_str_seq, words_1_padded) { run_test(" Hello ", 1); } + +TEST(vedernikova_k_word_num_in_str_seq, words_2) { run_test("Hello World", 2); } +TEST(vedernikova_k_word_num_in_str_seq, words_2_leading) { run_test(" Hello World", 2); } +TEST(vedernikova_k_word_num_in_str_seq, words_2_trailing) { run_test("Hello World ", 2); } +TEST(vedernikova_k_word_num_in_str_seq, words_2_padded) { run_test(" Hello World ", 2); } +TEST(vedernikova_k_word_num_in_str_seq, words_2_inner) { run_test("Hello World", 2); } + +TEST(vedernikova_k_word_num_in_str_seq, words_3) { run_test("1 2 3", 3); } diff --git a/tasks/seq/vedernikova_k_word_num_in_str/include/ops_seq.hpp b/tasks/seq/vedernikova_k_word_num_in_str/include/ops_seq.hpp new file mode 100644 index 00000000000..84004356364 --- /dev/null +++ b/tasks/seq/vedernikova_k_word_num_in_str/include/ops_seq.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace vedernikova_k_word_num_in_str_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + size_t res_{}; +}; + +} // namespace vedernikova_k_word_num_in_str_seq \ No newline at end of file diff --git a/tasks/seq/vedernikova_k_word_num_in_str/perf_tests/main.cpp b/tasks/seq/vedernikova_k_word_num_in_str/perf_tests/main.cpp new file mode 100644 index 00000000000..d8db206e183 --- /dev/null +++ b/tasks/seq/vedernikova_k_word_num_in_str/perf_tests/main.cpp @@ -0,0 +1,52 @@ +#include + +#include + +#include "../include/ops_seq.hpp" +#include "core/perf/include/perf.hpp" + +void run_test(std::string &&in, size_t solution, + const std::function, + const std::shared_ptr)> &executor) { + size_t out = 0; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(1); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + ppc::core::Perf perfAnalyzer(testTaskSequential); + executor(perfAnalyzer, perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_EQ(out, solution); +} +void run_test(const std::function, + const std::shared_ptr)> &executor) { + run_test("Sentence for word counter test", 5, executor); +} + +TEST(vedernikova_k_word_num_in_str_mpi_perf_test, test_pipeline_run) { + run_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.pipeline_run(perfAttr, perfResults); + }); +} + +TEST(vedernikova_k_word_num_in_str_mpi_perf_test, test_task_run) { + run_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.task_run(perfAttr, perfResults); + }); +} diff --git a/tasks/seq/vedernikova_k_word_num_in_str/src/ops_seq.cpp b/tasks/seq/vedernikova_k_word_num_in_str/src/ops_seq.cpp new file mode 100644 index 00000000000..822eede2d43 --- /dev/null +++ b/tasks/seq/vedernikova_k_word_num_in_str/src/ops_seq.cpp @@ -0,0 +1,46 @@ +#include "../include/ops_seq.hpp" + +#include + +bool vedernikova_k_word_num_in_str_seq::TestTaskSequential::validation() { + internal_order_test(); + + return taskData->outputs_count[0] == 1; +} + +bool vedernikova_k_word_num_in_str_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + input_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + + res_ = 0; + + return true; +} + +bool vedernikova_k_word_num_in_str_seq::TestTaskSequential::run() { + internal_order_test(); + + bool is_space = input_[0] == ' '; + for (const char c : input_) { + if (c == ' ') { + if (!is_space) { + res_++; + } + is_space = true; + continue; + } + is_space = false; + } + res_ += (is_space || input_.empty()) ? 0 : 1; + + return true; +} + +bool vedernikova_k_word_num_in_str_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + *reinterpret_cast(taskData->outputs[0]) = res_; + + return true; +} From 083c551851134a10cd3300a8b878ab3baa7e65ef Mon Sep 17 00:00:00 2001 From: Egor1dzeN <113616439+Egor1dzeN@users.noreply.github.com> Date: Tue, 5 Nov 2024 00:41:20 +0300 Subject: [PATCH 103/155] =?UTF-8?q?=D0=9C=D0=BE=D1=80=D0=BE=D0=B7=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=95=D0=B3=D0=BE=D1=80.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=821?= =?UTF-8?q?7.=20=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD?= =?UTF-8?q?=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9?= =?UTF-8?q?=20=D0=BF=D0=BE=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20?= =?UTF-8?q?=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B=20(#67)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **_Описание последовательной задачи_** Создаем вектор размера равным количеству строк матрицы. Проходимся по строкам матрицы и вычисляем максимальное значение и кладем в вектор ответов. **_Описание параллельной задачи_** Делим матрицу на несколько частей (в каждой равное количество строк (почти, максимальная разница по количество<=1)). Вычисляем в каждой части минимальное значение по строкам. Собираем все ответы в один общий вектор ответов. --- .../func_tests/main.cpp | 217 ++++++++++++++++++ .../include/ops_mpi.hpp | 42 ++++ .../perf_tests/main.cpp | 90 ++++++++ .../src/ops_mpi.cpp | 139 +++++++++++ .../func_tests/main.cpp | 160 +++++++++++++ .../include/ops_seq.hpp | 23 ++ .../perf_tests/main.cpp | 91 ++++++++ .../src/ops_seq.cpp | 48 ++++ 8 files changed, 810 insertions(+) create mode 100644 tasks/mpi/morozov_e_min_val_in_rows_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/morozov_e_min_val_in_rows_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/morozov_e_min_val_in_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/morozov_e_min_val_in_rows_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/morozov_e_min_val_in_rows_matrix/func_tests/main.cpp create mode 100644 tasks/seq/morozov_e_min_val_in_rows_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/morozov_e_min_val_in_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/morozov_e_min_val_in_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/morozov_e_min_val_in_rows_matrix/func_tests/main.cpp b/tasks/mpi/morozov_e_min_val_in_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..edd23489809 --- /dev/null +++ b/tasks/mpi/morozov_e_min_val_in_rows_matrix/func_tests/main.cpp @@ -0,0 +1,217 @@ +#include + +#include +#include +#include + +#include "mpi/morozov_e_min_val_in_rows_matrix/include/ops_mpi.hpp" +std::vector> getRandomMatrix_(int n, int m) { + int left = 0; + int right = 10005; + + // Создаем матрицу + std::vector> matrix(n, std::vector(m)); + + // Заполняем матрицу случайными значениями + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + matrix[i][j] = left + std::rand() % (right - left + 1); + } + } + for (int i = 0; i < n; ++i) { + int m_ = std::rand() % m; + matrix[i][m_] = -1; + } + return matrix; +} +TEST(morozov_e_min_val_in_rows_matrix_MPI, Test_Validation_isFalse0) { + boost::mpi::communicator world; + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector> matrix = {{1, 1}, {2, 2}}; + if (world.rank() == 0) { + morozov_e_min_val_in_rows_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataSeq); + morozov_e_min_val_in_rows_matrix::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_FALSE(testMpiTaskParallel.validation()); + ASSERT_FALSE(testMpiTaskSequential.validation()); + } +} +TEST(morozov_e_min_val_in_rows_matrix_MPI, Test_Validation_isFalse1) { + boost::mpi::communicator world; + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector> matrix = {{1, 1}, {2, 2}}; + if (world.rank() == 0) { + for (size_t i = 0; i < matrix.size(); ++i) + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + morozov_e_min_val_in_rows_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataSeq); + morozov_e_min_val_in_rows_matrix::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_FALSE(testMpiTaskParallel.validation()); + ASSERT_FALSE(testMpiTaskSequential.validation()); + } +} +TEST(morozov_e_min_val_in_rows_matrix_MPI, Test_Validation_isFalse2) { + boost::mpi::communicator world; + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector> matrix = {{1, 1}, {2, 2}}; + if (world.rank() == 0) { + for (size_t i = 0; i < matrix.size(); ++i) taskDataSeq->inputs_count.emplace_back(1); + morozov_e_min_val_in_rows_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataSeq); + morozov_e_min_val_in_rows_matrix::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_FALSE(testMpiTaskParallel.validation()); + ASSERT_FALSE(testMpiTaskSequential.validation()); + } +} +TEST(morozov_e_min_val_in_rows_matrix_MPI, Test_Validation_isFalse3) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + std::vector> matrix = {{1, 1}, {2, 2}}; + std::vector res(2, 0); + if (world.rank() == 0) { + for (size_t i = 0; i < matrix.size(); ++i) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + taskDataPar->inputs_count.emplace_back(2); + } + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(3); + morozov_e_min_val_in_rows_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + morozov_e_min_val_in_rows_matrix::TestMPITaskSequential testMpiTaskSequential(taskDataPar); + ASSERT_FALSE(testMpiTaskParallel.validation()); + ASSERT_FALSE(testMpiTaskSequential.validation()); + } +} + +TEST(morozov_e_min_val_in_rows_matrix_MPI, Test_Validation_isTrue) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + std::vector> matrix = {{1, 1}, {2, 2}}; + std::vector res = {1, 2}; + + if (world.rank() == 0) { + for (size_t i = 0; i < matrix.size(); ++i) + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(matrix[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + morozov_e_min_val_in_rows_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + morozov_e_min_val_in_rows_matrix::TestMPITaskSequential testMpiTaskSequential(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + ASSERT_TRUE(testMpiTaskSequential.validation()); + } +} + +TEST(morozov_e_min_val_in_rows_matrix_MPI, Test_Main1) { + std::vector> matrixPar; + std::vector> matrixSeq; + const int n = 3; + const int m = 3; + std::vector resPar(n); + std::vector resSeq(n); + boost::mpi::communicator world; + std::shared_ptr taskDataSeq = std::make_shared(); + std::shared_ptr taskDataPar = std::make_shared(); + matrixSeq = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + for (size_t i = 0; i < matrixSeq.size(); ++i) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrixSeq[i].data())); + } + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(resSeq.size()); + morozov_e_min_val_in_rows_matrix::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + if (world.rank() == 0) { + matrixPar = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + for (size_t i = 0; i < matrixPar.size(); ++i) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrixPar[i].data())); + } + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(resPar.data())); + taskDataPar->outputs_count.emplace_back(resPar.size()); + } + morozov_e_min_val_in_rows_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + ASSERT_EQ(resSeq[0], 1); + ASSERT_EQ(resSeq[1], 4); + ASSERT_EQ(resSeq[2], 7); + if (world.rank() == 0) { + ASSERT_EQ(resPar[0], 1); + ASSERT_EQ(resPar[1], 4); + ASSERT_EQ(resPar[2], 7); + } +} +TEST(morozov_e_min_val_in_rows_matrix_MPI, Test_Main2) { + std::vector> matrix; + const int n = 1000; + const int m = 1000; + std::vector resPar(n); + std::vector res(n); + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + matrix = getRandomMatrix_(n, m); + for (size_t i = 0; i < matrix.size(); ++i) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + } + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(resPar.data())); + taskDataPar->outputs_count.emplace_back(resPar.size()); + } + morozov_e_min_val_in_rows_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + for (int i = 0; i < n; ++i) { + ASSERT_EQ(resPar[i], -1); + } + } +} +TEST(morozov_e_min_val_in_rows_matrix_MPI, Test_Main3) { + std::vector> matrix; + const int n = 1500; + const int m = 1500; + std::vector resPar(n); + std::vector res(n); + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + matrix = getRandomMatrix_(n, m); + for (size_t i = 0; i < matrix.size(); ++i) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + } + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(resPar.data())); + taskDataPar->outputs_count.emplace_back(resPar.size()); + } + morozov_e_min_val_in_rows_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + for (int i = 0; i < n; ++i) { + ASSERT_EQ(resPar[i], -1); + } + } +} diff --git a/tasks/mpi/morozov_e_min_val_in_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/morozov_e_min_val_in_rows_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..b36cd1c45d3 --- /dev/null +++ b/tasks/mpi/morozov_e_min_val_in_rows_matrix/include/ops_mpi.hpp @@ -0,0 +1,42 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace morozov_e_min_val_in_rows_matrix { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> matrix_; + std::vector min_val_list_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> matrix_; + std::vector min_val_list_; + int n, m; + boost::mpi::communicator world; +}; + +} // namespace morozov_e_min_val_in_rows_matrix \ No newline at end of file diff --git a/tasks/mpi/morozov_e_min_val_in_rows_matrix/perf_tests/main.cpp b/tasks/mpi/morozov_e_min_val_in_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..b56ab28763b --- /dev/null +++ b/tasks/mpi/morozov_e_min_val_in_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,90 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/morozov_e_min_val_in_rows_matrix/include/ops_mpi.hpp" +std::vector> getRandomMatrix_(int n, int m) { + int left = 0; + int right = 10005; + + // Создаем матрицу + std::vector> matrix(n, std::vector(m)); + + // Заполняем матрицу случайными значениями + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + matrix[i][j] = left + std::rand() % (right - left + 1); + } + } + for (int i = 0; i < n; ++i) { + int m_ = std::rand() % m; + matrix[i][m_] = -1; + } + return matrix; +} +TEST(morozov_e_min_val_in_rows_matrix_perf_test, test_pipeline_run_my) { + boost::mpi::communicator world; + const int n = 5000; + const int m = 5000; + std::vector> matrix(n, std::vector(m)); + std::vector res(n); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + matrix = getRandomMatrix_(n, m); + for (int i = 0; i < n; ++i) taskDataPar->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(n); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + for (int i = 0; i < n; ++i) { + ASSERT_EQ(-1, res[i]); + } + } +} +TEST(morozov_e_min_val_in_rows_matrix_perf_test, test_task_run_my) { + boost::mpi::communicator world; + const int n = 450; + const int m = 450; + std::vector> matrix(n, std::vector(m)); + std::vector res(n); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + matrix = getRandomMatrix_(n, m); + for (int i = 0; i < n; ++i) taskDataPar->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(n); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + if (world.rank() == 0) { + for (int i = 0; i < n; ++i) { + ASSERT_EQ(-1, res[i]); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/morozov_e_min_val_in_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/morozov_e_min_val_in_rows_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..9aed056cbc4 --- /dev/null +++ b/tasks/mpi/morozov_e_min_val_in_rows_matrix/src/ops_mpi.cpp @@ -0,0 +1,139 @@ +#include "mpi/morozov_e_min_val_in_rows_matrix/include/ops_mpi.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool morozov_e_min_val_in_rows_matrix::TestMPITaskSequential::pre_processing() { + internal_order_test(); + int n = taskData->inputs_count[0]; + int m = taskData->inputs_count[1]; + matrix_ = std::vector>(n, std::vector(m)); + min_val_list_ = std::vector(n); + for (int i = 0; i < n; ++i) { + int* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < m; ++j) { + matrix_[i][j] = tmp_ptr[j]; + } + } + return true; +} +bool morozov_e_min_val_in_rows_matrix::TestMPITaskSequential::validation() { + internal_order_test(); + if (taskData->inputs_count.size() != 2 || taskData->inputs_count[0] <= 0 || taskData->inputs_count[0] <= 0) + return false; + if (taskData->outputs_count.size() != 1 || taskData->outputs_count[0] != taskData->inputs_count[0]) return false; + return true; +} +bool morozov_e_min_val_in_rows_matrix::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < matrix_.size(); ++i) { + int cur_max = matrix_[i][0]; + for (size_t j = 0; j < matrix_[i].size(); ++j) { + cur_max = std::min(cur_max, matrix_[i][j]); + } + min_val_list_[i] = cur_max; + } + return true; +} +bool morozov_e_min_val_in_rows_matrix::TestMPITaskSequential::post_processing() { + internal_order_test(); + int* outputs = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < min_val_list_.size(); i++) { + outputs[i] = min_val_list_[i]; + } + return true; +} +bool morozov_e_min_val_in_rows_matrix::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + n = taskData->inputs_count[0]; + m = taskData->inputs_count[1]; + + matrix_.resize(n, std::vector(m)); + for (int i = 0; i < n; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + matrix_[i].assign(input_matrix, input_matrix + m); + } + } + + return true; +} +bool morozov_e_min_val_in_rows_matrix::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if (taskData->inputs_count.size() != 2 || taskData->inputs_count[0] <= 0 || taskData->inputs_count[0] <= 0) + return false; + if (taskData->outputs_count.size() != 1 || taskData->outputs_count[0] != taskData->inputs_count[0]) return false; + } + return true; +} +bool morozov_e_min_val_in_rows_matrix::TestMPITaskParallel::run() { + internal_order_test(); + + broadcast(world, n, 0); + broadcast(world, m, 0); + + int delta = n / world.size(); + int mod = n % world.size(); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + int begin = proc * delta + std::min(proc, mod); + int count = delta + (proc < mod ? 1 : 0); + for (int r = begin; r < begin + count; r++) { + world.send(proc, 0, matrix_[r].data(), m); + } + } + } + + int cur_n = delta + (world.rank() < mod ? 1 : 0); + std::vector> local_matrix(cur_n, std::vector(m)); + + if (world.rank() == 0) { + std::copy(matrix_.begin(), matrix_.begin() + cur_n, local_matrix.begin()); + } else { + for (int r = 0; r < cur_n; r++) { + world.recv(0, 0, local_matrix[r].data(), m); + } + } + + min_val_list_.resize(n); + + std::vector cur_min_vector(local_matrix.size(), INT_MAX); + for (size_t i = 0; i < local_matrix.size(); i++) { + for (const auto& val : local_matrix[i]) { + cur_min_vector[i] = std::min(cur_min_vector[i], val); + } + } + + if (world.rank() == 0) { + int i_cur = 0; + std::copy(cur_min_vector.begin(), cur_min_vector.end(), min_val_list_.begin()); + i_cur += cur_min_vector.size(); + for (int proc = 1; proc < world.size(); proc++) { + int loc_size; + world.recv(proc, 0, &loc_size, 1); + std::vector loc_res_(loc_size); + world.recv(proc, 0, loc_res_.data(), loc_size); + copy(loc_res_.begin(), loc_res_.end(), min_val_list_.data() + i_cur); + i_cur += loc_res_.size(); + } + } else { + int cur_count = (int)cur_min_vector.size(); + world.send(0, 0, &cur_count, 1); + world.send(0, 0, cur_min_vector.data(), cur_count); + } + return true; +} + +bool morozov_e_min_val_in_rows_matrix::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + auto* res = reinterpret_cast(taskData->outputs[0]); + std::copy(min_val_list_.begin(), min_val_list_.end(), res); + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/morozov_e_min_val_in_rows_matrix/func_tests/main.cpp b/tasks/seq/morozov_e_min_val_in_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..1e9fcbda403 --- /dev/null +++ b/tasks/seq/morozov_e_min_val_in_rows_matrix/func_tests/main.cpp @@ -0,0 +1,160 @@ +#include + +#include + +#include "seq/morozov_e_min_val_in_rows_matrix/include/ops_seq.hpp" +std::vector> getRandomMatrix_(int n, int m) { + int left = 0; + int right = 10005; + + std::vector> matrix(n, std::vector(m)); + + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + matrix[i][j] = left + std::rand() % (right - left + 1); + } + } + for (int i = 0; i < n; ++i) { + int m_ = std::rand() % m; + matrix[i][m_] = -1; + } + return matrix; +} +TEST(morozov_e_min_val_in_rows_matrix_Sequential, Test_Validation_False0) { + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector> matrix = {{1, 1}, {2, 2}}; + morozov_e_min_val_in_rows_matrix::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_FALSE(testMpiTaskSequential.validation()); +} +TEST(morozov_e_min_val_in_rows_matrix_Sequential, Test_Validation_False1) { + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector> matrix = {{1, 1}, {2, 2}}; + for (size_t i = 0; i < matrix.size(); ++i) taskDataSeq->inputs_count.emplace_back(1); + morozov_e_min_val_in_rows_matrix::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_FALSE(testMpiTaskSequential.validation()); +} +TEST(morozov_e_min_val_in_rows_matrix_Sequential, Test_Validation_False2) { + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector> matrix = {{1, 1}, {2, 2}}; + for (size_t i = 0; i < matrix.size(); ++i) + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + morozov_e_min_val_in_rows_matrix::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_FALSE(testMpiTaskSequential.validation()); +} + +TEST(morozov_e_min_val_in_rows_matrix_Sequential, Test_Validation_True) { + std::shared_ptr taskDataSeq = std::make_shared(); + std::vector> matrix = {{1, 1}, {2, 2}}; + std::vector res = {1, 2}; + for (size_t i = 0; i < matrix.size(); ++i) + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(matrix[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(res.size()); + morozov_e_min_val_in_rows_matrix::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); +} +TEST(morozov_e_min_val_in_rows_matrix_Sequential, Test_Main0) { + std::vector> matrix; + const int n = 2; + const int m = 2; + std::vector resSeq(n); + std::vector res(n); + + std::shared_ptr taskDataSeq = std::make_shared(); + matrix = {{1, 2}, {3, 4}}; + for (size_t i = 0; i < matrix.size(); ++i) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + } + + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(resSeq.size()); + morozov_e_min_val_in_rows_matrix::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(resSeq[0], 1); + ASSERT_EQ(resSeq[1], 3); +} +TEST(morozov_e_min_val_in_rows_matrix_Sequential, Test_Main1) { + std::vector> matrix; + const int n = 10; + const int m = 10; + std::vector resSeq(n); + std::vector res(n); + + std::shared_ptr taskDataSeq = std::make_shared(); + matrix = getRandomMatrix_(n, m); + for (size_t i = 0; i < matrix.size(); ++i) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + } + + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(resSeq.size()); + morozov_e_min_val_in_rows_matrix::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int i = 0; i < n; ++i) { + ASSERT_EQ(resSeq[i], -1); + } +} +TEST(morozov_e_min_val_in_rows_matrix_Sequential, Test_Main2) { + std::vector> matrix; + const int n = 1000; + const int m = 1000; + std::vector resSeq(n); + std::vector res(n); + + std::shared_ptr taskDataSeq = std::make_shared(); + matrix = getRandomMatrix_(n, m); + for (size_t i = 0; i < matrix.size(); ++i) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + } + + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(resSeq.size()); + morozov_e_min_val_in_rows_matrix::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int i = 0; i < n; ++i) { + ASSERT_EQ(resSeq[i], -1); + } +} +TEST(morozov_e_min_val_in_rows_matrix_Sequential, Test_Main3) { + std::vector> matrix; + const int n = 5000; + const int m = 5000; + std::vector resSeq(n); + std::vector res(n); + + std::shared_ptr taskDataSeq = std::make_shared(); + matrix = getRandomMatrix_(n, m); + for (size_t i = 0; i < matrix.size(); ++i) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + } + + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(resSeq.size()); + morozov_e_min_val_in_rows_matrix::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int i = 0; i < n; ++i) { + ASSERT_EQ(resSeq[i], -1); + } +} \ No newline at end of file diff --git a/tasks/seq/morozov_e_min_val_in_rows_matrix/include/ops_seq.hpp b/tasks/seq/morozov_e_min_val_in_rows_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..cf56021c5f0 --- /dev/null +++ b/tasks/seq/morozov_e_min_val_in_rows_matrix/include/ops_seq.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace morozov_e_min_val_in_rows_matrix { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> matrix_; + std::vector min_val_list_; +}; + +} // namespace morozov_e_min_val_in_rows_matrix \ No newline at end of file diff --git a/tasks/seq/morozov_e_min_val_in_rows_matrix/perf_tests/main.cpp b/tasks/seq/morozov_e_min_val_in_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..11a0c6a31fc --- /dev/null +++ b/tasks/seq/morozov_e_min_val_in_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,91 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/morozov_e_min_val_in_rows_matrix/include/ops_seq.hpp" +std::vector> getRandomMatrix_(int n, int m) { + int left = 0; + int right = 10005; + + std::vector> matrix(n, std::vector(m)); + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + matrix[i][j] = left + std::rand() % (right - left + 1); + } + } + for (int i = 0; i < n; ++i) { + int m_ = std::rand() % m; + matrix[i][m_] = -1; + } + return matrix; +} +TEST(sequential_example_perf_test, test_pipeline_run_my) { + const int n = 5000; + const int m = 5000; + std::vector> matrix(n, std::vector(m)); + std::vector res(n); + std::shared_ptr taskDataPar = std::make_shared(); + matrix = getRandomMatrix_(n, m); + for (int i = 0; i < n; ++i) taskDataPar->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(n); + auto testTaskSeq = std::make_shared(taskDataPar); + ASSERT_EQ(testTaskSeq->validation(), true); + testTaskSeq->pre_processing(); + testTaskSeq->run(); + testTaskSeq->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSeq); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (int i = 0; i < n; ++i) { + ASSERT_EQ(-1, res[i]); + } +} + +TEST(sequential_example_perf_test, test_task_run_my) { + const int n = 5000; + const int m = 5000; + std::vector> matrix(n, std::vector(m)); + std::vector res(n); + matrix = getRandomMatrix_(n, m); + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < n; ++i) taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(n); + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (int i = 0; i < n; ++i) { + ASSERT_EQ(-1, res[i]); + } +} diff --git a/tasks/seq/morozov_e_min_val_in_rows_matrix/src/ops_seq.cpp b/tasks/seq/morozov_e_min_val_in_rows_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..afdf25d5d1a --- /dev/null +++ b/tasks/seq/morozov_e_min_val_in_rows_matrix/src/ops_seq.cpp @@ -0,0 +1,48 @@ +#include "seq/morozov_e_min_val_in_rows_matrix/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; +bool morozov_e_min_val_in_rows_matrix::TestTaskSequential::pre_processing() { + internal_order_test(); + int n = taskData->inputs_count[0]; + int m = taskData->inputs_count[1]; + matrix_ = std::vector>(n, std::vector(m)); + min_val_list_ = std::vector(n); + for (int i = 0; i < n; ++i) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < m; ++j) { + matrix_[i][j] = input_matrix[j]; + } + } + return true; +} +bool morozov_e_min_val_in_rows_matrix::TestTaskSequential::validation() { + internal_order_test(); + if (taskData->inputs_count.size() != 2 || taskData->inputs_count[0] <= 0 || taskData->inputs_count[0] <= 0) + return false; + if (taskData->outputs_count.size() != 1 || taskData->outputs_count[0] != taskData->inputs_count[0]) return false; + return true; +} +bool morozov_e_min_val_in_rows_matrix::TestTaskSequential::run() { + internal_order_test(); + int n = taskData->inputs_count[0]; + int m = taskData->inputs_count[1]; + for (int i = 0; i < n; ++i) { + int cur_max = matrix_[i][0]; + for (int j = 0; j < m; ++j) { + cur_max = std::min(cur_max, matrix_[i][j]); + } + min_val_list_[i] = cur_max; + } + return true; +} +bool morozov_e_min_val_in_rows_matrix::TestTaskSequential::post_processing() { + internal_order_test(); + int* outputs = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < min_val_list_.size(); i++) { + outputs[i] = min_val_list_[i]; + } + return true; +} \ No newline at end of file From 534788ae4f907e757bff40d862cb218874ed37ba Mon Sep 17 00:00:00 2001 From: KatyaKozlova <113104616+KatyaKozlova@users.noreply.github.com> Date: Tue, 5 Nov 2024 00:42:18 +0300 Subject: [PATCH 104/155] =?UTF-8?q?=D0=9A=D0=BE=D0=B7=D0=BB=D0=BE=D0=B2?= =?UTF-8?q?=D0=B0=20=D0=95=D0=BA=D0=B0=D1=82=D0=B5=D1=80=D0=B8=D0=BD=D0=B0?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2026.=20=D0=9F=D1=80=D0=BE=D0=B2?= =?UTF-8?q?=D0=B5=D1=80=D0=BA=D0=B0=20=D0=BB=D0=B5=D0=BA=D1=81=D0=B8=D0=BA?= =?UTF-8?q?=D0=BE=D0=B3=D1=80=D0=B0=D1=84=D0=B8=D1=87=D0=B5=D1=81=D0=BA?= =?UTF-8?q?=D0=BE=D0=B9=20=D1=83=D0=BF=D0=BE=D1=80=D1=8F=D0=B4=D0=BE=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=BD=D0=BE=D1=81=D1=82=D0=B8=20=D0=B4=D0=B2=D1=83?= =?UTF-8?q?=D1=85=20=D1=81=D1=82=D1=80=D0=BE=D0=BA.=20(#101)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Описание последовательной задачи: Последовательная версия задачи заключается в определении, упорядочены ли две строки в лексикографическом порядке. Проверяется каждая строка. Если в любой момент текущий символ больше следующего, строка считается неупорядоченной лексикографически. В результат записывается вектор из двух чисел, результатов проверки лексикографического порядка первой и второй строки соответственно. Если строка упорядочена записывается 1, не упорядочена - 0. ### Описание параллельной задачи: Параллельная версия выполняет ту же задачу, но с использованием MPI для распределения вычислений между несколькими процессами. Входные строки передаются всем процессам, при этом главный процесс получает строки и отправляет их остальным процессам через broadcast. Каждый процесс получает часть символов строк и выполняет проверку их упорядоченности. Строки разделяются на подстроки в зависимости от количества процессов. Проверяется лексикографическая упорядоченность для подстрок. Главный процесс анализирует полученные результаты и определяет, отсортированы ли строки в целом. --------- Co-authored-by: KatyaKozlova --- .../kozlova_e_lexic_order/func_tests/main.cpp | 202 ++++++++++++++++++ .../kozlova_e_lexic_order/include/ops_mpi.hpp | 48 +++++ .../kozlova_e_lexic_order/perf_tests/main.cpp | 119 +++++++++++ .../mpi/kozlova_e_lexic_order/src/ops_mpi.cpp | 159 ++++++++++++++ .../kozlova_e_lexic_order/func_tests/main.cpp | 126 +++++++++++ .../kozlova_e_lexic_order/include/ops_seq.hpp | 26 +++ .../kozlova_e_lexic_order/perf_tests/main.cpp | 118 ++++++++++ .../seq/kozlova_e_lexic_order/src/ops_seq.cpp | 61 ++++++ 8 files changed, 859 insertions(+) create mode 100644 tasks/mpi/kozlova_e_lexic_order/func_tests/main.cpp create mode 100644 tasks/mpi/kozlova_e_lexic_order/include/ops_mpi.hpp create mode 100644 tasks/mpi/kozlova_e_lexic_order/perf_tests/main.cpp create mode 100644 tasks/mpi/kozlova_e_lexic_order/src/ops_mpi.cpp create mode 100644 tasks/seq/kozlova_e_lexic_order/func_tests/main.cpp create mode 100644 tasks/seq/kozlova_e_lexic_order/include/ops_seq.hpp create mode 100644 tasks/seq/kozlova_e_lexic_order/perf_tests/main.cpp create mode 100644 tasks/seq/kozlova_e_lexic_order/src/ops_seq.cpp diff --git a/tasks/mpi/kozlova_e_lexic_order/func_tests/main.cpp b/tasks/mpi/kozlova_e_lexic_order/func_tests/main.cpp new file mode 100644 index 00000000000..83eb470a4b2 --- /dev/null +++ b/tasks/mpi/kozlova_e_lexic_order/func_tests/main.cpp @@ -0,0 +1,202 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/kozlova_e_lexic_order/include/ops_mpi.hpp" + +TEST(kozlova_e_lexic_order, Test_mixed_strings) { + boost::mpi::communicator world; + std::vector input_strings = {"aBcdef", "cDefga"}; + std::vector resMPI(2, 0); + std::vector answer = {1, 0}; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (const auto &str : input_strings) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(str.c_str()))); + } + taskDataPar->inputs_count.emplace_back(2); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(resMPI.size()); + } + + kozlova_e_lexic_order_mpi::StringComparatorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector resSeq(2, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (const auto &str : input_strings) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str.c_str()))); + } + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + kozlova_e_lexic_order_mpi::StringComparatorSeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, answer); + } +} +TEST(kozlova_e_lexic_order, Test_Ordered_Strings) { + boost::mpi::communicator world; + std::vector input_strings = {"abcde", "abcdef"}; + std::vector resMPI(2, 0); + std::vector answer(2, 1); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (const auto &str : input_strings) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(str.c_str()))); + } + taskDataPar->inputs_count.emplace_back(2); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(resMPI.size()); + } + + kozlova_e_lexic_order_mpi::StringComparatorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector resSeq(2, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (const auto &str : input_strings) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str.c_str()))); + } + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + kozlova_e_lexic_order_mpi::StringComparatorSeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, resSeq); + } +} + +TEST(kozlova_e_lexic_order, Test_Unordered_Strings) { + boost::mpi::communicator world; + std::vector input_strings = {"cba", "fedcba"}; + std::vector resMPI(2, 0); + std::vector answer(2, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (const auto &str : input_strings) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(str.c_str()))); + } + taskDataPar->inputs_count.emplace_back(2); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(resMPI.size()); + } + + kozlova_e_lexic_order_mpi::StringComparatorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector resSeq(2, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (const auto &str : input_strings) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str.c_str()))); + } + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + kozlova_e_lexic_order_mpi::StringComparatorSeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, resSeq); + } +} + +TEST(kozlova_e_lexic_order, Test_Boundary_Case) { + boost::mpi::communicator world; + std::vector input_strings = {"a", "b"}; + std::vector resMPI(2, 0); + std::vector answer(2, 1); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (const auto &str : input_strings) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(str.c_str()))); + } + taskDataPar->inputs_count.emplace_back(2); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(resMPI.size()); + } + + kozlova_e_lexic_order_mpi::StringComparatorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(resMPI, answer); + } +} + +TEST(kozlova_e_lexic_order, Test_empty_strings) { + boost::mpi::communicator world; + std::vector input_strings = {"", ""}; + std::vector resMPI(2, 0); + std::vector answer(2, 1); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (const auto &str : input_strings) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(str.c_str()))); + } + taskDataPar->inputs_count.emplace_back(2); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(resMPI.size()); + } + + kozlova_e_lexic_order_mpi::StringComparatorMPI testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector resSeq(2, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (const auto &str : input_strings) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str.c_str()))); + } + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(resSeq.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + kozlova_e_lexic_order_mpi::StringComparatorSeq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(resMPI, resSeq); + } +} diff --git a/tasks/mpi/kozlova_e_lexic_order/include/ops_mpi.hpp b/tasks/mpi/kozlova_e_lexic_order/include/ops_mpi.hpp new file mode 100644 index 00000000000..60011f278d9 --- /dev/null +++ b/tasks/mpi/kozlova_e_lexic_order/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kozlova_e_lexic_order_mpi { + +std::vector LexicographicallyOrdered(const std::string& str1, const std::string& str2); + +class StringComparatorSeq : public ppc::core::Task { + public: + explicit StringComparatorSeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string str1{}; + std::string str2{}; + std::vector res{}; +}; + +class StringComparatorMPI : public ppc::core::Task { + public: + explicit StringComparatorMPI(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_strings; + std::vector res; + boost::mpi::communicator world; +}; + +} // namespace kozlova_e_lexic_order_mpi \ No newline at end of file diff --git a/tasks/mpi/kozlova_e_lexic_order/perf_tests/main.cpp b/tasks/mpi/kozlova_e_lexic_order/perf_tests/main.cpp new file mode 100644 index 00000000000..4a27705ff8c --- /dev/null +++ b/tasks/mpi/kozlova_e_lexic_order/perf_tests/main.cpp @@ -0,0 +1,119 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kozlova_e_lexic_order/include/ops_mpi.hpp" + +TEST(kozlova_e_lexic_order_mpi, test_pipeline_run) { + boost::mpi::communicator world; + const int size = 2000000; + std::vector resMPI; + std::vector expect = {1, 1}; + std::string str1; + std::string str2; + str1.resize(size, '\0'); + str2.resize(size, '\0'); + for (int i = 0; i < 30; i++) { + str1[i] = 'a'; + str2[i] = 'b'; + } + for (int i = 30; i < 60; i++) { + str1[i] = 'C'; + str2[i] = 'd'; + } + for (int i = 60; i < size; i++) { + str1[i] = 'e'; + str2[i] = 'f'; + } + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + resMPI = {0, 0}; + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(str1.c_str()))); + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(str2.c_str()))); + taskDataPar->inputs_count.emplace_back(static_cast(2)); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(static_cast(2)); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(resMPI, expect); + } +} + +TEST(kozlova_e_lexic_order_mpi, test_task_run) { + boost::mpi::communicator world; + const int size = 2000000; + std::vector resMPI; + std::vector expect = {1, 1}; + std::string str1; + std::string str2; + str1.resize(size, '\0'); + str2.resize(size, '\0'); + for (int i = 0; i < 30; i++) { + str1[i] = 'a'; + str2[i] = 'b'; + } + for (int i = 30; i < 60; i++) { + str1[i] = 'C'; + str2[i] = 'd'; + } + for (int i = 60; i < size; i++) { + str1[i] = 'e'; + str2[i] = 'f'; + } + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + resMPI = {0, 0}; + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(str1.c_str()))); + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(str2.c_str()))); + taskDataPar->inputs_count.emplace_back(static_cast(2)); + taskDataPar->outputs.emplace_back(reinterpret_cast(resMPI.data())); + taskDataPar->outputs_count.emplace_back(static_cast(2)); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(resMPI, expect); + } +} diff --git a/tasks/mpi/kozlova_e_lexic_order/src/ops_mpi.cpp b/tasks/mpi/kozlova_e_lexic_order/src/ops_mpi.cpp new file mode 100644 index 00000000000..84c6579506a --- /dev/null +++ b/tasks/mpi/kozlova_e_lexic_order/src/ops_mpi.cpp @@ -0,0 +1,159 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/kozlova_e_lexic_order/include/ops_mpi.hpp" + +#include +#include + +std::vector kozlova_e_lexic_order_mpi::LexicographicallyOrdered(const std::string& str1, const std::string& str2) { + int flag1 = 1; + int flag2 = 1; + std::vector localres; + std::string lowerStr1 = str1; + std::transform(lowerStr1.begin(), lowerStr1.end(), lowerStr1.begin(), ::tolower); + + std::string lowerStr2 = str2; + std::transform(lowerStr2.begin(), lowerStr2.end(), lowerStr2.begin(), ::tolower); + for (size_t i = 0; i < lowerStr1.size() - 1; ++i) { + if (lowerStr1[i] > lowerStr1[i + 1]) { + flag1 = 0; + break; + } + } + + for (size_t i = 0; i < lowerStr2.size() - 1; ++i) { + if (lowerStr2[i] > lowerStr2[i + 1]) { + flag2 = 0; + break; + } + } + localres.emplace_back(flag1); + localres.emplace_back(flag2); + return localres; +} + +bool kozlova_e_lexic_order_mpi::StringComparatorSeq::pre_processing() { + internal_order_test(); + + auto* s1 = reinterpret_cast(taskData->inputs[0]); + auto* s2 = reinterpret_cast(taskData->inputs[1]); + + str1 = std::string(s1); + str2 = std::string(s2); + return true; +} + +bool kozlova_e_lexic_order_mpi::StringComparatorSeq::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 2; +} + +bool kozlova_e_lexic_order_mpi::StringComparatorSeq::run() { + internal_order_test(); + res.resize(2); + if (str1.empty()) res[0] = 1; + if (str2.empty()) + res[1] = 1; + else + res = LexicographicallyOrdered(str1, str2); + return true; +} + +bool kozlova_e_lexic_order_mpi::StringComparatorSeq::post_processing() { + internal_order_test(); + for (size_t i = 0; i < res.size(); i++) { + reinterpret_cast(taskData->outputs[0])[i] = static_cast(res[i]); + } + return true; +} + +bool kozlova_e_lexic_order_mpi::StringComparatorMPI::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto* s1 = reinterpret_cast(taskData->inputs[0]); + auto* s2 = reinterpret_cast(taskData->inputs[1]); + input_strings.resize(2); + input_strings[0] = std::string(s1); + input_strings[1] = std::string(s2); + } else { + input_strings.resize(2); + } + res.resize(2, 0); + return true; +} + +bool kozlova_e_lexic_order_mpi::StringComparatorMPI::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] == 2; + } + return true; +} + +bool kozlova_e_lexic_order_mpi::StringComparatorMPI::run() { + internal_order_test(); + for (int i = 0; i < 2; i++) boost::mpi::broadcast(world, input_strings[i], 0); + std::vector local_res(2, 1); + int len1 = input_strings[0].size(); + int len2 = input_strings[1].size(); + int delta1 = (len1 + world.size() - 1) / world.size(); + int delta2 = (len2 + world.size() - 1) / world.size(); + int start1 = world.rank() * delta1; + int end1 = std::min(start1 + delta1, len1); + int start2 = world.rank() * delta2; + int end2 = std::min(start2 + delta2, len2); + + std::string local_string1 = (start1 < len1) ? input_strings[0].substr(start1, end1 - start1) : ""; + std::string local_string2 = (start2 < len2) ? input_strings[1].substr(start2, end2 - start2) : ""; + + if (!local_string1.empty()) { + local_res[0] = LexicographicallyOrdered(local_string1, local_string2)[0]; + } + if (!local_string2.empty()) { + local_res[1] = LexicographicallyOrdered(local_string1, local_string2)[1]; + } + + if (world.rank() < world.size() - 1) { + if (end1 > 0 && end1 < len1) { + char last_char1 = std::tolower(input_strings[0][end1 - 1]); + char first_char1_next = std::tolower(input_strings[0][end1]); + if (last_char1 > first_char1_next) { + local_res[0] = 0; + } + } + if (end2 > 0 && end2 < len2) { + char last_char2 = std::tolower(input_strings[1][end2 - 1]); + char first_char2_next = std::tolower(input_strings[1][end2]); + if (last_char2 > first_char2_next) { + local_res[1] = 0; + } + } + } + + std::vector global_results(2 * world.size()); + boost::mpi::gather(world, local_res.data(), local_res.size(), global_results.data(), 0); + + if (world.rank() == 0) { + int is_ordered1 = 1; + int is_ordered2 = 1; + for (int i = 0; i < world.size(); ++i) { + if (global_results[i * 2] == 0) is_ordered1 = 0; + if (global_results[i * 2 + 1] == 0) is_ordered2 = 0; + } + res[0] = is_ordered1; + res[1] = is_ordered2; + } + return true; +} + +bool kozlova_e_lexic_order_mpi::StringComparatorMPI::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (size_t i = 0; i < res.size(); i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} diff --git a/tasks/seq/kozlova_e_lexic_order/func_tests/main.cpp b/tasks/seq/kozlova_e_lexic_order/func_tests/main.cpp new file mode 100644 index 00000000000..ae3b9a9aece --- /dev/null +++ b/tasks/seq/kozlova_e_lexic_order/func_tests/main.cpp @@ -0,0 +1,126 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/kozlova_e_lexic_order/include/ops_seq.hpp" + +TEST(kozlova_e_lexic_order, Test_twoStrings) { + // Create data + const char *str1 = "aaabbbccc"; + const char *str2 = "apples"; + std::vector out(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str1))); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str2))); + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + // Create Task + kozlova_e_lexic_order::StringComparator testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out[0], 1); + ASSERT_EQ(out[1], 0); +} + +TEST(kozlova_e_lexic_order, Test_EQ_strings) { + // Create data + const char *str1 = "aaabbbccc"; + const char *str2 = "aaabbbccc"; + std::vector out(2, 0); + std::vector expect(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str1))); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str2))); + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + // Create Task + kozlova_e_lexic_order::StringComparator testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out, expect); +} + +TEST(kozlova_e_lexic_order, Test_not_eq_strings) { + // Create data + const char *str1 = "asd"; + const char *str2 = "qwerty"; + std::vector out(2, 0); + std::vector expect(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str1))); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str2))); + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + // Create Task + kozlova_e_lexic_order::StringComparator testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out, expect); +} + +TEST(kozlova_e_lexic_order, Test_empty_strings) { + // Create data + const char *str1 = " "; + const char *str2 = " "; + std::vector out(2, 0); + std::vector expect(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str1))); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str2))); + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + // Create Task + kozlova_e_lexic_order::StringComparator testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out, expect); +} + +TEST(kozlova_e_lexic_order, Test_register_strings) { + // Create data + const char *str1 = "aBc"; + const char *str2 = "abC"; + std::vector out(2, 0); + std::vector expect(2, 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str1))); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(str2))); + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + // Create Task + kozlova_e_lexic_order::StringComparator testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out, expect); +} \ No newline at end of file diff --git a/tasks/seq/kozlova_e_lexic_order/include/ops_seq.hpp b/tasks/seq/kozlova_e_lexic_order/include/ops_seq.hpp new file mode 100644 index 00000000000..58dc514723d --- /dev/null +++ b/tasks/seq/kozlova_e_lexic_order/include/ops_seq.hpp @@ -0,0 +1,26 @@ + +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kozlova_e_lexic_order { + +class StringComparator : public ppc::core::Task { + public: + explicit StringComparator(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string str1{}; + std::string str2{}; + std::vector res{}; + std::vector LexicographicallyOrdered(); +}; + +} // namespace kozlova_e_lexic_order \ No newline at end of file diff --git a/tasks/seq/kozlova_e_lexic_order/perf_tests/main.cpp b/tasks/seq/kozlova_e_lexic_order/perf_tests/main.cpp new file mode 100644 index 00000000000..01e4dc89740 --- /dev/null +++ b/tasks/seq/kozlova_e_lexic_order/perf_tests/main.cpp @@ -0,0 +1,118 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kozlova_e_lexic_order/include/ops_seq.hpp" + +TEST(kozlova_e_lexic_order_perf_test, test_pipeline_run) { + char *str1 = new char[1000000 + 1]; + char *str2 = new char[1000000 + 1]; + + for (int i = 0; i < 30; i++) { + str1[i] = 'a'; + str2[i] = 'b'; + } + for (int i = 30; i < 60; i++) { + str1[i] = 'C'; + str2[i] = 'd'; + } + for (int i = 60; i < 1000000; i++) { + str1[i] = 'e'; + str2[i] = 'f'; + } + str1[1000000] = '\0'; + str2[1000000] = '\0'; + + std::vector out(2, 0); + std::vector expect(2, 1); + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str2)); + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expect, out); + delete[] str1; + delete[] str2; +} + +TEST(kozlova_e_lexic_order_perf_test, test_task_run) { + char *str1 = new char[1000000 + 1]; + char *str2 = new char[1000000 + 1]; + + for (int i = 0; i < 30; i++) { + str1[i] = 'a'; + str2[i] = 'b'; + } + for (int i = 30; i < 60; i++) { + str1[i] = 'C'; + str2[i] = 'd'; + } + for (int i = 60; i < 1000000; i++) { + str1[i] = 'e'; + str2[i] = 'f'; + } + str1[1000000] = '\0'; + str2[1000000] = '\0'; + + std::vector out(2, 0); + std::vector expect(2, 1); + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str1)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str2)); + taskDataSeq->inputs_count.emplace_back(static_cast(2)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(static_cast(2)); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expect, out); + delete[] str1; + delete[] str2; +} \ No newline at end of file diff --git a/tasks/seq/kozlova_e_lexic_order/src/ops_seq.cpp b/tasks/seq/kozlova_e_lexic_order/src/ops_seq.cpp new file mode 100644 index 00000000000..2af8fb979fa --- /dev/null +++ b/tasks/seq/kozlova_e_lexic_order/src/ops_seq.cpp @@ -0,0 +1,61 @@ + +#include "seq/kozlova_e_lexic_order/include/ops_seq.hpp" + +#include + +bool kozlova_e_lexic_order::StringComparator::pre_processing() { + internal_order_test(); + + auto* s1 = reinterpret_cast(taskData->inputs[0]); + auto* s2 = reinterpret_cast(taskData->inputs[1]); + + str1 = std::string(s1); + str2 = std::string(s2); + return true; +} + +bool kozlova_e_lexic_order::StringComparator::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 2; +} + +std::vector kozlova_e_lexic_order::StringComparator::LexicographicallyOrdered() { + int flag1 = 1; + int flag2 = 1; + std::vector localres; + std::string lowerStr1 = str1; + std::transform(lowerStr1.begin(), lowerStr1.end(), lowerStr1.begin(), ::tolower); + + std::string lowerStr2 = str2; + std::transform(lowerStr2.begin(), lowerStr2.end(), lowerStr2.begin(), ::tolower); + for (size_t i = 0; i < lowerStr1.size() - 1; ++i) { + if (lowerStr1[i] > lowerStr1[i + 1]) { + flag1 = 0; + break; + } + } + + for (size_t i = 0; i < lowerStr2.size() - 1; ++i) { + if (lowerStr2[i] > lowerStr2[i + 1]) { + flag2 = 0; + break; + } + } + localres.emplace_back(flag1); + localres.emplace_back(flag2); + return localres; +} + +bool kozlova_e_lexic_order::StringComparator::run() { + internal_order_test(); + res = LexicographicallyOrdered(); + return true; +} + +bool kozlova_e_lexic_order::StringComparator::post_processing() { + internal_order_test(); + for (size_t i = 0; i < res.size(); i++) { + reinterpret_cast(taskData->outputs[0])[i] = static_cast(res[i]); + } + return true; +} From 4828db93474a1d326ed7619b255c087c271fb004 Mon Sep 17 00:00:00 2001 From: Nikita Koshkin <113104527+NikitaKoshkin@users.noreply.github.com> Date: Tue, 5 Nov 2024 03:27:21 +0300 Subject: [PATCH 105/155] =?UTF-8?q?=D0=9A=D0=BE=D1=88=D0=BA=D0=B8=D0=BD=20?= =?UTF-8?q?=D0=9D=D0=B8=D0=BA=D0=B8=D1=82=D0=B0.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=2012.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D0=B7=D0=BD?= =?UTF-8?q?=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81?= =?UTF-8?q?=D1=82=D0=BE=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82?= =?UTF-8?q?=D1=80=D0=B8=D1=86=D1=8B.=20(#173)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Создается двумерный вектор для хранения элементов матрицы. Программа вычисляет сумму для каждого столбца матрицы и сохраняет результаты в отдельный вектор. Описание MPI задачи: распределяем строки по процессам, и каждый процесс посчитает сумму для определенного набора строк. Итоговые суммы по столбцам собираем на главном процессе. 1) В pre_processing() храним исходную матрицу в одномерном векторе input_ (размером rows * columns); 2) 2.1) В run() используем broadcast для передачи значений rows (строчки) и columns (столбцы); 2.2) Главный процесс (с ранком 0) делит строки матрицы между процессами, каждому процессу отправляется отрезок данных (несколько строк матрицы) через send(); 2.3) Остальные процессы получают свои строки через recv() и сохраняют их в local_input_; 3) Каждый процесс выполняет суммирование по столбцам на своём куске матрицы (в local_input_) и сохраняет результаты в local_res; 4) В конце run() через функцию reduce() выполняется суммирование результатов локально обработки (local_res) от всех процессов и сохраняет итоговую сумму по каждому столбцу в векторе res на главном процессе. --- .../func_tests/main.cpp | 286 ++++++++++++++++++ .../include/ops_mpi.hpp | 46 +++ .../perf_tests/main.cpp | 102 +++++++ .../src/ops_mpi.cpp | 150 +++++++++ .../func_tests/main.cpp | 170 +++++++++++ .../include/ops_seq.hpp | 25 ++ .../perf_tests/main.cpp | 92 ++++++ .../src/ops_seq.cpp | 54 ++++ 8 files changed, 925 insertions(+) create mode 100644 tasks/mpi/koshkin_n_sum_values_by_columns_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/koshkin_n_sum_values_by_columns_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/koshkin_n_sum_values_by_columns_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/koshkin_n_sum_values_by_columns_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/koshkin_n_sum_values_by_columns_matrix/func_tests/main.cpp create mode 100644 tasks/seq/koshkin_n_sum_values_by_columns_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/koshkin_n_sum_values_by_columns_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/koshkin_n_sum_values_by_columns_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/func_tests/main.cpp b/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..c79895764a3 --- /dev/null +++ b/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/func_tests/main.cpp @@ -0,0 +1,286 @@ +#include + +#include +#include +#include +#include + +#include "mpi/koshkin_n_sum_values_by_columns_matrix/include/ops_mpi.hpp" + +std::vector getRndVect(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(koshkin_n_sum_values_by_columns_matrix_MPI, Test_SquareMatrixSmall) { + boost::mpi::communicator world; + + int rows = 10; + int columns = 10; + + std::vector matrix = getRndVect(columns * rows); + std::vector res_out_paral(columns, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_out_seq(columns, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_out_seq.size()); + + // Create Task + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_out_paral, res_out_seq); + } +} + +TEST(koshkin_n_sum_values_by_columns_matrix_MPI, Test_EmptyMatrix) { + boost::mpi::communicator world; + + const int rows = 0; + const int columns = 0; + + std::vector matrix = {}; + std::vector res_out_paral(columns, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(koshkin_n_sum_values_by_columns_matrix_MPI, Test_SquareMatrixMedium) { + boost::mpi::communicator world; + + const int rows = 100; + const int columns = 100; + + std::vector matrix = getRndVect(columns * rows); + std::vector res_out_paral(columns, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_out_seq(columns, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_out_seq.size()); + + // Create Task + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_out_paral, res_out_seq); + } +} + +TEST(koshkin_n_sum_values_by_columns_matrix_MPI, Test_SquareMatrixLarge) { + boost::mpi::communicator world; + + const int rows = 1000; + const int columns = 1000; + + std::vector matrix = getRndVect(columns * rows); + std::vector res_out_paral(columns, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_out_seq(columns, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_out_seq.size()); + + // Create Task + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_out_paral, res_out_seq); + } +} + +TEST(koshkin_n_sum_values_by_columns_matrix_MPI, Test_MatrixSmall15x10) { + boost::mpi::communicator world; + + const int rows = 15; + const int columns = 10; + + std::vector matrix = getRndVect(columns * rows); + std::vector res_out_paral(columns, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_out_seq(columns, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_out_seq.size()); + + // Create Task + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_out_paral, res_out_seq); + } +} + +TEST(koshkin_n_sum_values_by_columns_matrix_MPI, Test_Matrix500x1000) { + boost::mpi::communicator world; + + const int rows = 500; + const int columns = 1000; + + std::vector matrix = getRndVect(columns * rows); + std::vector res_out_paral(columns, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_out_seq(columns, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_out_seq.size()); + + // Create Task + koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_out_paral, res_out_seq); + } +} \ No newline at end of file diff --git a/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/include/ops_mpi.hpp b/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..ba663a6c4ba --- /dev/null +++ b/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/include/ops_mpi.hpp @@ -0,0 +1,46 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace koshkin_n_sum_values_by_columns_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res; + int rows; + int columns; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector res, input_, local_input_; + int rows; + int columns; + boost::mpi::communicator world; +}; +} // namespace koshkin_n_sum_values_by_columns_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/perf_tests/main.cpp b/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..2e6a31c72db --- /dev/null +++ b/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/perf_tests/main.cpp @@ -0,0 +1,102 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/koshkin_n_sum_values_by_columns_matrix/include/ops_mpi.hpp" + +TEST(koshkin_n_sum_values_by_columns_matrix_mpi, test_pipeline_run) { + boost::mpi::communicator world; + + int rows = 300; + int columns = 300; + + std::vector matrix(columns * rows, 0); + std::vector res_out_paral(columns, 0); + std::vector exp_res_paral(columns, 0); + matrix[5] = 1; + exp_res_paral[5] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res_out_paral, exp_res_paral); + } +} + +TEST(koshkin_n_sum_values_by_columns_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + int rows = 300; + int columns = 300; + + std::vector matrix(columns * rows, 0); + std::vector res_out_paral(columns, 0); + std::vector exp_res_paral(columns, 0); + matrix[5] = 1; + exp_res_paral[5] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res_out_paral, exp_res_paral); + } +} \ No newline at end of file diff --git a/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/src/ops_mpi.cpp b/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..8e5f7b98d3f --- /dev/null +++ b/tasks/mpi/koshkin_n_sum_values_by_columns_matrix/src/ops_mpi.cpp @@ -0,0 +1,150 @@ +#include "mpi/koshkin_n_sum_values_by_columns_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +bool koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + rows = taskData->inputs_count[0]; + columns = taskData->inputs_count[1]; + + // TaskData + input_.resize(rows, std::vector(columns)); + + int* inputMatrix = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < columns; ++j) { + input_[i][j] = inputMatrix[i * columns + j]; + } + } + + res.resize(columns, 0); // sumColumns + return true; +} + +bool koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + taskData->inputs_count[1] == taskData->outputs_count[0]); +} + +bool koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + for (int j = 0; j < columns; ++j) { + res[j] = 0; + for (int i = 0; i < rows; ++i) { + res[j] += input_[i][j]; + } + } + return true; +} + +bool koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + int* outputSums = reinterpret_cast(taskData->outputs[0]); + for (int j = 0; j < columns; ++j) { + outputSums[j] = res[j]; + } + return true; +} + +bool koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + columns = taskData->inputs_count[1]; + input_.resize(rows * columns); + int* inputMatrix = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < columns; ++j) { + input_[i * columns + j] = inputMatrix[i * columns + j]; + } + } + } + + res.resize(columns, 0); + return true; +} + +bool koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + taskData->inputs_count[1] == taskData->outputs_count[0]); + } + return true; +} + +bool koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + columns = taskData->inputs_count[1]; + } + + broadcast(world, rows, 0); + broadcast(world, columns, 0); + + int rows_per_process; + int extra_rows; + + if (world.rank() == 0) { + rows_per_process = rows / world.size(); + extra_rows = rows % world.size(); + } + + broadcast(world, rows_per_process, 0); + broadcast(world, extra_rows, 0); + + int local_rows = rows_per_process + (world.rank() < extra_rows ? 1 : 0); + + local_input_.resize(local_rows * columns); + + if (world.rank() == 0) { + int offset = local_rows * columns; + for (int proc = 1; proc < world.size(); ++proc) { + int proc_rows = rows_per_process + (proc < extra_rows ? 1 : 0); + world.send(proc, 2, input_.data() + offset, proc_rows * columns); + offset += proc_rows * columns; + } + std::copy(input_.begin(), input_.begin() + local_rows * columns, local_input_.begin()); + } else { + world.recv(0, 2, local_input_.data(), local_rows * columns); + } + + std::vector local_sum(columns, 0); + + for (int i = 0; i < local_rows; ++i) { + for (int j = 0; j < columns; ++j) { + local_sum[j] += local_input_[i * columns + j]; + } + } + + res.resize(columns, 0); + boost::mpi::reduce(world, local_sum.data(), columns, res.data(), std::plus<>(), 0); + + return true; +} + +bool koshkin_n_sum_values_by_columns_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + int* outputSums = reinterpret_cast(taskData->outputs[0]); + for (int j = 0; j < columns; ++j) { + outputSums[j] = res[j]; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/koshkin_n_sum_values_by_columns_matrix/func_tests/main.cpp b/tasks/seq/koshkin_n_sum_values_by_columns_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..7d3da4ef130 --- /dev/null +++ b/tasks/seq/koshkin_n_sum_values_by_columns_matrix/func_tests/main.cpp @@ -0,0 +1,170 @@ +#include + +#include + +#include "seq/koshkin_n_sum_values_by_columns_matrix/include/ops_seq.hpp" + +TEST(koshkin_n_sum_values_by_columns_matrix_seq, Test_invalid_matrix_validation_columns) { + int rows = 5; + int columns = 0; + + std::shared_ptr taskDataSeq = std::make_shared(); + koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + std::vector matrix = {1, 2, 3, 4, 5}; + std::vector res_out = {0, 0}; + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_NE(testTaskSequential.validation(), true); +} + +TEST(koshkin_n_sum_values_by_columns_matrix_seq, Test_invalid_matrix_validation_rows) { + int rows = 0; + int columns = 15; + + std::shared_ptr taskDataSeq = std::make_shared(); + koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + std::vector matrix = {2}; + std::vector res_out = {0, 0}; + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + // ASSERT_NE(testTaskSequential.validation(), true); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(koshkin_n_sum_values_by_columns_matrix_seq, Test_sum_values_by_columns_SquareMatrixSmall) { + int rows = 2; + int columns = 2; + + std::shared_ptr taskDataSeq = std::make_shared(); + koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + std::vector matrix = {1, 2, 3, 4}; + std::vector res_out = {0, 0}; // Sum column + std::vector exp_res = {4, 6}; + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + // After post processing it should look like this + // input_ = { + // {1, 2}, + // {3, 4} + // }; + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + // res[0] (sum of the first column) = 1 + 3 = 4 + // res[1] (sum of the second column) = 2 + 4 = 6 + + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} + +TEST(koshkin_n_sum_values_by_columns_matrix_seq, Test_sum_values_by_columns_SquareMatrixLarge) { + const int rows = 1000; + const int columns = 1000; + + std::shared_ptr taskDataSeq = std::make_shared(); + koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + std::vector matrix(rows * columns); + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < columns; ++j) { + matrix[i * columns + j] = i + j; + } + } + std::vector res_out(columns, 0); + std::vector exp_res(columns, 0); + for (int j = 0; j < columns; ++j) { + for (int i = 0; i < rows; ++i) { + exp_res[j] += (i + j); + } + } + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} + +TEST(koshkin_n_sum_values_by_columns_matrix_seq, Test_sum_values_by_columns_MatrixSmall4x10) { + const int rows = 4; + const int columns = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + std::vector matrix = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 1 row + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, // 2 + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, // 3 + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 // 4 + }; + std::vector res_out(columns, 0); + std::vector exp_res(columns, 0); + for (int j = 0; j < columns; ++j) { + exp_res[j] = matrix[j] + matrix[j + columns] + matrix[j + 2 * columns] + matrix[j + 3 * columns]; + } + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} + +TEST(koshkin_n_sum_values_by_columns_matrix_seq, Test_sum_values_by_columns_MatrixLarge400x600) { + const int rows = 400; + const int columns = 600; + + std::shared_ptr taskDataSeq = std::make_shared(); + koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + std::vector matrix(columns * rows, 0); + std::vector res_out(columns, 0); + std::vector exp_res(columns, 0); + matrix[15] = 15; + exp_res[15] = 15; + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} diff --git a/tasks/seq/koshkin_n_sum_values_by_columns_matrix/include/ops_seq.hpp b/tasks/seq/koshkin_n_sum_values_by_columns_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..6eee6ac1087 --- /dev/null +++ b/tasks/seq/koshkin_n_sum_values_by_columns_matrix/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace koshkin_n_sum_values_by_columns_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res; + int rows; + int columns; +}; +} // namespace koshkin_n_sum_values_by_columns_matrix_seq \ No newline at end of file diff --git a/tasks/seq/koshkin_n_sum_values_by_columns_matrix/perf_tests/main.cpp b/tasks/seq/koshkin_n_sum_values_by_columns_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..e87bebc9b51 --- /dev/null +++ b/tasks/seq/koshkin_n_sum_values_by_columns_matrix/perf_tests/main.cpp @@ -0,0 +1,92 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/koshkin_n_sum_values_by_columns_matrix/include/ops_seq.hpp" + +TEST(koshkin_n_sum_values_by_columns_matrix_seq, test_pipeline_run) { + int rows = 3000; + int columns = 3000; + + std::vector matrix(columns * rows, 0); + std::vector res_out(columns, 0); + std::vector exp_res(columns, 0); + for (int i = 0; i < 1000; i += 2) { + matrix[i] = 1; + exp_res[i] = 1; + } + + std::shared_ptr taskDataSeq = std::make_shared(); + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(res_out, exp_res); +} + +TEST(koshkin_n_sum_values_by_columns_matrix_seq, test_task_run) { + int rows = 3000; + int columns = 3000; + + std::vector matrix(columns * rows, 0); + std::vector res_out(columns, 0); + std::vector exp_res(columns, 0); + for (int i = 0; i < 1000; i += 2) { + matrix[i] = 1; + exp_res[i] = 1; + } + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = + std::make_shared(taskDataSeq); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res_out, exp_res); +} \ No newline at end of file diff --git a/tasks/seq/koshkin_n_sum_values_by_columns_matrix/src/ops_seq.cpp b/tasks/seq/koshkin_n_sum_values_by_columns_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..51e5807c229 --- /dev/null +++ b/tasks/seq/koshkin_n_sum_values_by_columns_matrix/src/ops_seq.cpp @@ -0,0 +1,54 @@ +#include "seq/koshkin_n_sum_values_by_columns_matrix/include/ops_seq.hpp" + +#include + +bool koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + rows = taskData->inputs_count[0]; + columns = taskData->inputs_count[1]; + + // TaskData + input_.resize(rows, std::vector(columns)); + + int* inputMatrix = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < columns; ++j) { + input_[i][j] = inputMatrix[i * columns + j]; + } + } + + res.resize(columns, 0); // sumColumns + return true; +} + +bool koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + taskData->inputs_count[1] == taskData->outputs_count[0]); +} + +bool koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + for (int j = 0; j < columns; ++j) { + res[j] = 0; + for (int i = 0; i < rows; ++i) { + res[j] += input_[i][j]; + } + } + return true; +} + +bool koshkin_n_sum_values_by_columns_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + int* outputSums = reinterpret_cast(taskData->outputs[0]); + for (int j = 0; j < columns; ++j) { + outputSums[j] = res[j]; + } + return true; +} \ No newline at end of file From dc14bf0e87a54ce29bbdb84601060e53cc246451 Mon Sep 17 00:00:00 2001 From: Mikhail Burykin <86657075+WholeHorse@users.noreply.github.com> Date: Tue, 5 Nov 2024 03:28:04 +0300 Subject: [PATCH 106/155] =?UTF-8?q?=D0=91=D1=83=D1=80=D1=8B=D0=BA=D0=B8?= =?UTF-8?q?=D0=BD=20=D0=9C=D0=B8=D1=85=D0=B0=D0=B8=D0=BB.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2024.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D1=91=D1=82?= =?UTF-8?q?=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=81=D0=BB=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.=20(#192)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit #### Описание последовательного алгоритма: - Если входные данные присутствуют, они считываются из taskData->inputs[0] и преобразуются в строку input_. Если данные отсутствуют, input_ устанавливается как пустая строка. - Подсчет слов выполняется вызовом функции count_words(), которая принимает input_ и возвращает число слов. Результат записывается в word_count_. - Для определения, является ли символ частью слова, используется метод is_word_character(). Он возвращает true, если символ является буквой или апострофом. - Когда символ является началом нового слова, увеличивается счетчик count, а флаг in_word устанавливается в true. Если обнаружен символ, не являющийся частью слова, флаг in_word сбрасывается. #### Описание MPI алгоритма: - На этапе pre_processing(): - Каждый процесс инициализирует переменную length, хранящую длину входных данных. - Процесс с рангом 0 (нулевой процесс) считывает входную строку из taskData->inputs[0] и сохраняет ее в вектор input_, чтобы позже распределить части строки между процессами. - Инициализируется счетчик word_count_ для хранения общего количества слов. - На этапе run(): - Если длина строки length равна нулю, процесс завершает выполнение без дополнительных операций. - В случае, когда в MPI-коммуникаторе участвует только один процесс, весь подсчет слов выполняется этим процессом, который просто подсчитывает символы-разделители слов. - Если несколько процессов участвуют, нулевой процесс (с рангом 0) распределяет строку на части для параллельной обработки: - Вычисляется размер части строки partSize, которую обрабатывает каждый процесс, за исключением последнего, который может получить более короткую часть, чтобы охватить всю строку. - Нулевой процесс отправляет каждому процессу часть строки. Все процессы, включая последний, получают ровно один фрагмент для обработки. - Нулевой процесс собирает результаты от всех процессов: - Каждый процесс считает количество разделителей слов в своей части строки и отправляет результат обратно нулевому процессу. - Нулевой процесс получает результаты от всех процессов, суммирует их, а также добавляет 1 к общему числу слов (так как разделители отделяют слова). - Каждый процесс получает свою часть строки (вектор chunk) и проходит по каждому символу, используя функцию is_word_character() для проверки, является ли символ частью слова. - Процесс увеличивает счетчик для каждого обнаруженного разделителя слов. - На этапе post_processing(): - Нулевой процесс записывает итоговое количество слов в taskData->outputs[0] для дальнейшего использования или вывода. --------- Co-authored-by: ovVrLFg8ks <122876910+ovVrLFg8ks@users.noreply.github.com> --- .../burykin_m_word_count/func_tests/main.cpp | 263 ++++++++++++++++++ .../burykin_m_word_count/include/ops_mpi.hpp | 56 ++++ .../burykin_m_word_count/perf_tests/main.cpp | 114 ++++++++ .../mpi/burykin_m_word_count/src/ops_mpi.cpp | 144 ++++++++++ .../burykin_m_word_count/func_tests/main.cpp | 101 +++++++ .../burykin_m_word_count/include/ops_seq.hpp | 27 ++ .../burykin_m_word_count/perf_tests/main.cpp | 148 ++++++++++ .../seq/burykin_m_word_count/src/ops_seq.cpp | 66 +++++ 8 files changed, 919 insertions(+) create mode 100644 tasks/mpi/burykin_m_word_count/func_tests/main.cpp create mode 100644 tasks/mpi/burykin_m_word_count/include/ops_mpi.hpp create mode 100644 tasks/mpi/burykin_m_word_count/perf_tests/main.cpp create mode 100644 tasks/mpi/burykin_m_word_count/src/ops_mpi.cpp create mode 100644 tasks/seq/burykin_m_word_count/func_tests/main.cpp create mode 100644 tasks/seq/burykin_m_word_count/include/ops_seq.hpp create mode 100644 tasks/seq/burykin_m_word_count/perf_tests/main.cpp create mode 100644 tasks/seq/burykin_m_word_count/src/ops_seq.cpp diff --git a/tasks/mpi/burykin_m_word_count/func_tests/main.cpp b/tasks/mpi/burykin_m_word_count/func_tests/main.cpp new file mode 100644 index 00000000000..63abb8a07cd --- /dev/null +++ b/tasks/mpi/burykin_m_word_count/func_tests/main.cpp @@ -0,0 +1,263 @@ +#include + +#include +#include +#include +#include +#include + +#include "mpi/burykin_m_word_count/include/ops_mpi.hpp" + +std::vector burykin_m_word_count::RandomSentence(int size) { + std::vector vec(size); + std::random_device dev; + std::mt19937 gen(dev()); + if (size > 0) { + vec[size - 1] = 0x61 + gen() % 26; + vec[0] = 0x41 + gen() % 26; + } + for (int i = 1; i < size - 1; i++) { + if (vec[i - 1] != ' ' && gen() % 4 == 0) { + vec[i] = ' '; + } else { + vec[i] = 0x61 + gen() % 26; + } + } + return vec; +} + +TEST(burykin_m_word_count_MPI_func, TestEmptyString) { + int length = 0; + + // Create data + std::vector input(length); + std::vector wordCount(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(input.size()); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataPar->outputs_count.emplace_back(wordCount.size()); + } + + // Create Task + burykin_m_word_count::TestTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector local_count(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(local_count.data())); + taskDataSeq->outputs_count.emplace_back(local_count.size()); + + // Create Task + burykin_m_word_count::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], local_count[0]); + } +} + +TEST(burykin_m_word_count_MPI_func, TestStringABC) { + std::string input_str = "abc"; + std::vector input(input_str.begin(), input_str.end()); + std::vector wordCount(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(input.size()); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataPar->outputs_count.emplace_back(wordCount.size()); + } + + // Create Task + burykin_m_word_count::TestTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector local_count(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(local_count.data())); + taskDataSeq->outputs_count.emplace_back(local_count.size()); + + // Create Task + burykin_m_word_count::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], local_count[0]); + } +} + +TEST(burykin_m_word_count_MPI_func, TestLength30) { + int length = 30; + + // Create data + std::vector input(length); + std::vector wordCount(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(input.size()); + if (world.rank() == 0) { + input = burykin_m_word_count::RandomSentence(length); + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataPar->outputs_count.emplace_back(wordCount.size()); + } + + // Create Task + burykin_m_word_count::TestTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector local_count(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(local_count.data())); + taskDataSeq->outputs_count.emplace_back(local_count.size()); + + // Create Task + burykin_m_word_count::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], local_count[0]); + } +} + +TEST(burykin_m_word_count_MPI_func, TestLength50) { + int length = 50; + + // Create data + std::vector input(length); + std::vector wordCount(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(input.size()); + if (world.rank() == 0) { + input = burykin_m_word_count::RandomSentence(length); + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataPar->outputs_count.emplace_back(wordCount.size()); + } + + // Create Task + burykin_m_word_count::TestTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector local_count(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(local_count.data())); + taskDataSeq->outputs_count.emplace_back(local_count.size()); + + // Create Task + burykin_m_word_count::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], local_count[0]); + } +} + +TEST(burykin_m_word_count_MPI_func, TestLength99) { + int length = 99; + + // Create data + std::vector input(length); + std::vector wordCount(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(input.size()); + if (world.rank() == 0) { + input = burykin_m_word_count::RandomSentence(length); + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataPar->outputs_count.emplace_back(wordCount.size()); + } + + // Create Task + burykin_m_word_count::TestTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector local_count(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(local_count.data())); + taskDataSeq->outputs_count.emplace_back(local_count.size()); + + // Create Task + burykin_m_word_count::TestTaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(wordCount[0], local_count[0]); + } +} diff --git a/tasks/mpi/burykin_m_word_count/include/ops_mpi.hpp b/tasks/mpi/burykin_m_word_count/include/ops_mpi.hpp new file mode 100644 index 00000000000..be1a08af4c2 --- /dev/null +++ b/tasks/mpi/burykin_m_word_count/include/ops_mpi.hpp @@ -0,0 +1,56 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace burykin_m_word_count { + +std::vector RandomSentence(int size); + +// Последовательная версия +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_; + int word_count_{}; + + static bool is_word_character(char c); + static int count_words(const std::string& text); +}; + +// Параллельная версия +class TestTaskParallel : public ppc::core::Task { + public: + explicit TestTaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + boost::mpi::communicator world; + std::vector input_; + std::vector local_input_; + int word_count_{}; + int local_word_count_{}; + int length{}; + + static bool is_word_character(char c); + int count_words(const std::vector& text); +}; + +} // namespace burykin_m_word_count diff --git a/tasks/mpi/burykin_m_word_count/perf_tests/main.cpp b/tasks/mpi/burykin_m_word_count/perf_tests/main.cpp new file mode 100644 index 00000000000..cd3d0e1f5cd --- /dev/null +++ b/tasks/mpi/burykin_m_word_count/perf_tests/main.cpp @@ -0,0 +1,114 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/burykin_m_word_count/include/ops_mpi.hpp" + +std::vector burykin_m_word_count::RandomSentence(int size) { + std::vector vec(size); + std::random_device dev; + std::mt19937 gen(dev()); + if (size > 0) { + vec[size - 1] = 0x61 + gen() % 26; + vec[0] = 0x41 + gen() % 26; + } + for (int i = 1; i < size - 1; i++) { + if (vec[i - 1] != ' ' && gen() % 4 == 0) { + vec[i] = ' '; + } else { + vec[i] = 0x61 + gen() % 26; + } + } + return vec; +} + +TEST(burykin_m_word_count_MPI_perf, test_pipeline_run) { + int length = 10000; + + // Create data + std::vector input(length, 'a'); + std::vector wordCount(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(input.size()); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataPar->outputs_count.emplace_back(wordCount.size()); + } + + // Create Task + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, wordCount[0]); + } +} + +TEST(burykin_m_word_count_MPI_perf, test_task_run) { + int length = 10000; + + // Create data + std::vector input(length, 'a'); + std::vector wordCount(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(input.size()); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordCount.data())); + taskDataPar->outputs_count.emplace_back(wordCount.size()); + } + + // Create Task + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, wordCount[0]); + } +} diff --git a/tasks/mpi/burykin_m_word_count/src/ops_mpi.cpp b/tasks/mpi/burykin_m_word_count/src/ops_mpi.cpp new file mode 100644 index 00000000000..146d72efe14 --- /dev/null +++ b/tasks/mpi/burykin_m_word_count/src/ops_mpi.cpp @@ -0,0 +1,144 @@ +#include "mpi/burykin_m_word_count/include/ops_mpi.hpp" + +using namespace std::chrono_literals; + +namespace burykin_m_word_count { + +bool TestTaskSequential::pre_processing() { + internal_order_test(); + if (taskData->inputs[0] != nullptr && taskData->inputs_count[0] > 0) { + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + } else { + input_ = ""; + } + word_count_ = 0; + return true; +} + +bool TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1; +} + +bool TestTaskSequential::run() { + internal_order_test(); + word_count_ = count_words(input_); + return true; +} + +bool TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = word_count_; + return true; +} +bool TestTaskSequential::is_word_character(char c) { return std::isalpha(static_cast(c)) != 0; } + +int TestTaskSequential::count_words(const std::string &text) { + int count = 0; + bool in_word = false; + + for (char c : text) { + if (is_word_character(c)) { + if (!in_word) { + count++; + in_word = true; + } + } else { + in_word = false; + } + } + + return count; +} + +bool TestTaskParallel::pre_processing() { + internal_order_test(); + + // Init vectors + length = taskData->inputs_count[0]; + + if (world.rank() == 0) { + input_ = std::vector(length); + char *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < length; i++) { + input_[i] = tmp_ptr[i]; + } + // Init values for output + word_count_ = 0; + } + + return true; +} + +bool TestTaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool TestTaskParallel::run() { + internal_order_test(); + + if (length == 0) { + return true; + } + +#if defined(_MSC_VER) && !defined(__clang__) + if (world.size() == 1) { + for (int i = 0; i < length; i++) { + if (!is_word_character(input_[i])) word_count_++; + } + word_count_++; + return true; + } +#endif + + int world_size = world.size(); + + if (world.rank() > length) { + return true; + } + if (world_size > length + 1) world_size = length + 1; + + int partSize = length / (world_size - 1); + int endPartSize = length - partSize * (world_size - 2); + + if (world.rank() == 0) { + for (int i = 0; i < world_size - 2; i++) { + world.send(i + 1, 0, input_.data() + i * partSize, partSize); + } + world.send(world_size - 1, 0, input_.data() + (world_size - 2) * partSize, endPartSize); + + int counter = 0; + for (int i = 0; i < world_size - 1; i++) { + world.recv(i + 1, 1, &counter, 1); + word_count_ += counter; + } + word_count_++; + } else { + int localPart = partSize; + if (world_size - 1 == world.rank()) localPart = endPartSize; + std::vector chunk(localPart); + int counter = 0; + world.recv(0, 0, chunk.data(), localPart); + + for (char ch : chunk) + if (!is_word_character(ch)) counter++; + world.send(0, 1, &counter, 1); + } + return true; +} + +bool TestTaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = word_count_; + } + return true; +} + +bool TestTaskParallel::is_word_character(char c) { return std::isalpha(static_cast(c)) != 0; } + +} // namespace burykin_m_word_count diff --git a/tasks/seq/burykin_m_word_count/func_tests/main.cpp b/tasks/seq/burykin_m_word_count/func_tests/main.cpp new file mode 100644 index 00000000000..43d47d9e355 --- /dev/null +++ b/tasks/seq/burykin_m_word_count/func_tests/main.cpp @@ -0,0 +1,101 @@ +#include + +#include "seq/burykin_m_word_count/include/ops_seq.hpp" + +TEST(WordCountSequential, TestIsWordCharacter) { + EXPECT_TRUE(burykin_m_word_count::TestTaskSequential::is_word_character('a')); + EXPECT_TRUE(burykin_m_word_count::TestTaskSequential::is_word_character('Z')); + + EXPECT_TRUE(burykin_m_word_count::TestTaskSequential::is_word_character('\'')); + + EXPECT_FALSE(burykin_m_word_count::TestTaskSequential::is_word_character('@')); + EXPECT_FALSE(burykin_m_word_count::TestTaskSequential::is_word_character('#')); + EXPECT_FALSE(burykin_m_word_count::TestTaskSequential::is_word_character('!')); + EXPECT_FALSE(burykin_m_word_count::TestTaskSequential::is_word_character('$')); + + EXPECT_FALSE(burykin_m_word_count::TestTaskSequential::is_word_character(' ')); + EXPECT_FALSE(burykin_m_word_count::TestTaskSequential::is_word_character('\n')); + EXPECT_FALSE(burykin_m_word_count::TestTaskSequential::is_word_character('\t')); + + EXPECT_FALSE(burykin_m_word_count::TestTaskSequential::is_word_character('\0')); +} + +TEST(WordCountSequential, EmptyString) { + std::string input; + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(in.data()); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + burykin_m_word_count::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(0, out[0]); +} + +TEST(WordCountSequential, SingleWord) { + std::string input = "Hello."; + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(in.data()); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + burykin_m_word_count::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(1, out[0]); +} + +TEST(WordCountSequential, MultipleWords) { + std::string input = "This is a test sentence."; + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(in.data()); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + burykin_m_word_count::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(5, out[0]); +} + +TEST(WordCountSequential, WordsWithApostrophes) { + std::string input = "It's a beautiful day, isn't it?"; + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(in.data()); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + burykin_m_word_count::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(6, out[0]); +} diff --git a/tasks/seq/burykin_m_word_count/include/ops_seq.hpp b/tasks/seq/burykin_m_word_count/include/ops_seq.hpp new file mode 100644 index 00000000000..01ee4faf48c --- /dev/null +++ b/tasks/seq/burykin_m_word_count/include/ops_seq.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace burykin_m_word_count { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + static bool is_word_character(char c); + + private: + std::string input_; + int word_count_{}; + static int count_words(const std::string& text); +}; + +} // namespace burykin_m_word_count diff --git a/tasks/seq/burykin_m_word_count/perf_tests/main.cpp b/tasks/seq/burykin_m_word_count/perf_tests/main.cpp new file mode 100644 index 00000000000..921468f5114 --- /dev/null +++ b/tasks/seq/burykin_m_word_count/perf_tests/main.cpp @@ -0,0 +1,148 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/burykin_m_word_count/include/ops_seq.hpp" + +using namespace ppc::core; +using namespace burykin_m_word_count; + +TEST(WordCountSequential, TestSingleWord) { + std::string input = "Hello."; + int expected_count = 1; + + std::vector input_data(input.begin(), input.end()); + std::vector output_data(1, 0); + + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskData->inputs_count.emplace_back(input_data.size()); + taskData->outputs.emplace_back(reinterpret_cast(output_data.data())); + taskData->outputs_count.emplace_back(output_data.size()); + + TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output_data[0], expected_count); +} + +TEST(WordCountSequential, TestMultipleWords) { + std::string input = "Hello world baba gaga."; + int expected_count = 4; + std::vector input_data(input.begin(), input.end()); + std::vector output_data(1, 0); + + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskData->inputs_count.emplace_back(input_data.size()); + taskData->outputs.emplace_back(reinterpret_cast(output_data.data())); + taskData->outputs_count.emplace_back(output_data.size()); + + TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output_data[0], expected_count); +} + +TEST(WordCountSequential, TestApostrophes) { + std::string input = "Feels like i'm walking on sunshine."; + int expected_count = 6; + std::vector input_data(input.begin(), input.end()); + std::vector output_data(1, 0); + + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskData->inputs_count.emplace_back(input_data.size()); + taskData->outputs.emplace_back(reinterpret_cast(output_data.data())); + taskData->outputs_count.emplace_back(output_data.size()); + + TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output_data[0], expected_count); +} + +TEST(WordCountSequential, TestNoWords) { + std::string input = "!!! ??? ..."; + int expected_count = 0; + std::vector input_data(input.begin(), input.end()); + std::vector output_data(1, 0); + + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskData->inputs_count.emplace_back(input_data.size()); + taskData->outputs.emplace_back(reinterpret_cast(output_data.data())); + taskData->outputs_count.emplace_back(output_data.size()); + + TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output_data[0], expected_count); +} + +TEST(WordCountSequential, PipelineRunPerformance) { + std::string input = "This is a sample text to test the word counting functionality."; + std::vector input_data(input.begin(), input.end()); + std::vector output_data(1, 0); + + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskData->inputs_count.emplace_back(input_data.size()); + taskData->outputs.emplace_back(reinterpret_cast(output_data.data())); + taskData->outputs_count.emplace_back(output_data.size()); + + auto task = std::make_shared(taskData); + Perf perfAnalyzer(task); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + perfAttr->current_timer = []() -> double { + return static_cast(std::chrono::steady_clock::now().time_since_epoch().count()) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + perfAnalyzer.pipeline_run(perfAttr, perfResults); + Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(output_data[0], 11); +} + +TEST(WordCountSequential, TaskRunPerformance) { + std::string input = "Another example sentence to evaluate the performance of the word counting task."; + std::vector input_data(input.begin(), input.end()); + std::vector output_data(1, 0); + + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input_data.data())); + taskData->inputs_count.emplace_back(input_data.size()); + taskData->outputs.emplace_back(reinterpret_cast(output_data.data())); + taskData->outputs_count.emplace_back(output_data.size()); + + auto task = std::make_shared(taskData); + Perf perfAnalyzer(task); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + perfAttr->current_timer = []() -> double { + return static_cast(std::chrono::steady_clock::now().time_since_epoch().count()) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + perfAnalyzer.task_run(perfAttr, perfResults); + Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(output_data[0], 12); +} diff --git a/tasks/seq/burykin_m_word_count/src/ops_seq.cpp b/tasks/seq/burykin_m_word_count/src/ops_seq.cpp new file mode 100644 index 00000000000..072974ae729 --- /dev/null +++ b/tasks/seq/burykin_m_word_count/src/ops_seq.cpp @@ -0,0 +1,66 @@ +#include "seq/burykin_m_word_count/include/ops_seq.hpp" + +namespace burykin_m_word_count { + +bool TestTaskSequential::pre_processing() { + internal_order_test(); + if (taskData->inputs[0] != nullptr && taskData->inputs_count[0] > 0) { + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + } else { + input_ = ""; + } + word_count_ = 0; + return true; +} + +bool TestTaskSequential::validation() { + internal_order_test(); + return (taskData->inputs_count[0] == 0 || taskData->inputs_count[0] > 0) && taskData->outputs_count[0] == 1; +} + +bool TestTaskSequential::run() { + internal_order_test(); + word_count_ = count_words(input_); + return true; +} + +bool TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = word_count_; + return true; +} + +bool TestTaskSequential::is_word_character(char c) { + return std::isalpha(static_cast(c)) != 0 || c == '\''; +} + +int TestTaskSequential::count_words(const std::string& text) { + int count = 0; + bool in_word = false; + + for (size_t i = 0; i < text.length(); ++i) { + char c = text[i]; + + if (is_word_character(c)) { + if (!in_word) { + count++; + in_word = true; + } + } else { + in_word = false; + } + + if (c == '\'' && i > 0 && i < text.length() - 1) { + if ((std::isalpha(text[i - 1]) != 0) && (std::isalpha(text[i + 1]) != 0)) { + in_word = true; + } + } + + // std::cout << "Символ: " << c << ", in_word: " << in_word << ", count: " << count << std::endl; + } + + // std::cout << "Итоговый счет слов: " << count << std::endl; + return count; +} + +} // namespace burykin_m_word_count From e34c5e5e3b0b08f7bbf43d6553cd7ee56696b824 Mon Sep 17 00:00:00 2001 From: Mike Ivanov <45334246+misha-ivanov@users.noreply.github.com> Date: Tue, 5 Nov 2024 03:29:00 +0300 Subject: [PATCH 107/155] =?UTF-8?q?=D0=98=D0=B2=D0=B0=D0=BD=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=9C=D0=B8=D1=85=D0=B0=D0=B8=D0=BB.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=2020.=20=D0=98=D0=BD=D1=82=D0=B5=D0=B3=D1=80=D0=B8=D1=80?= =?UTF-8?q?=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B5=20=E2=80=93=20=D0=BC=D0=B5?= =?UTF-8?q?=D1=82=D0=BE=D0=B4=20=D1=82=D1=80=D0=B0=D0=BF=D0=B5=D1=86=D0=B8?= =?UTF-8?q?=D0=B9.=20(#162)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Алгоритм интегрирования методом трапеций подразумевает:** 1) Разделение отрезка интегрирования на N равных частей 2) Получение значения функции в точках начала и конца i-ой части отрезка 3) Определение площади трапеции, образованной 4-мя точками, полученными в шаге 1 и 2 4) Суммирование площадей трапеций **Последовательная версия:** поиск длины i-ого отрезка (`step`), последовательный проход по каждому отрезку длины `step` в заданном диапазоне интегрирования (от a до b) и получение площадей трапеций. Так как площадь i-ой трапеции равна `(h_i + h_(i + 1)) / 2 * step`, то можно вынести за пределы цикла, суммирующего площади трапеций, операции: (`/ 2 * step`) **Параллельная версия:** получение входных параметров для 0-ого процесса, расчёт step, передача через `broadcast` во все остальные процессы значений: a, b, n, step. Каждый процесс в цикле рассчитывает сумму оснований i-ой трапеции, где: `i = proc_rank + j * communicator_size`, `j++` при `j_0 = 0`; После окончания цикла данные собираются через reduce и оператор `std::plus<>()` в общую сумму `result` и к ней применяется операция: (`/ 2 * step`) --------- Signed-off-by: Mike Ivanov Signed-off-by: Mike Ivanov <--global> Co-authored-by: Mike Ivanov Co-authored-by: Mike Ivanov <--global> --- .../func_tests/main.cpp | 571 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 ++ .../perf_tests/main.cpp | 104 ++++ .../src/ops_mpi.cpp | 95 +++ .../func_tests/main.cpp | 361 +++++++++++ .../include/ops_seq.hpp | 27 + .../perf_tests/main.cpp | 92 +++ .../src/ops_seq.cpp | 39 ++ 8 files changed, 1337 insertions(+) create mode 100644 tasks/mpi/ivanov_m_integration_trapezoid/func_tests/main.cpp create mode 100644 tasks/mpi/ivanov_m_integration_trapezoid/include/ops_mpi.hpp create mode 100644 tasks/mpi/ivanov_m_integration_trapezoid/perf_tests/main.cpp create mode 100644 tasks/mpi/ivanov_m_integration_trapezoid/src/ops_mpi.cpp create mode 100644 tasks/seq/ivanov_m_integration_trapezoid/func_tests/main.cpp create mode 100644 tasks/seq/ivanov_m_integration_trapezoid/include/ops_seq.hpp create mode 100644 tasks/seq/ivanov_m_integration_trapezoid/perf_tests/main.cpp create mode 100644 tasks/seq/ivanov_m_integration_trapezoid/src/ops_seq.cpp diff --git a/tasks/mpi/ivanov_m_integration_trapezoid/func_tests/main.cpp b/tasks/mpi/ivanov_m_integration_trapezoid/func_tests/main.cpp new file mode 100644 index 00000000000..8cba7ef658c --- /dev/null +++ b/tasks/mpi/ivanov_m_integration_trapezoid/func_tests/main.cpp @@ -0,0 +1,571 @@ +// Copyright 2024 Ivanov Mike +#include + +#include +#include +#include +#include +#include + +#include "mpi/ivanov_m_integration_trapezoid/include/ops_mpi.hpp" + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, simple_parabola) { + boost::mpi::communicator world; + double a = 0; + double b = 1; + int n = 10000; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, simple_parabola_swapped_borders) { + boost::mpi::communicator world; + double a = 1; + double b = 0; + int n = 10000; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, line_function) { + boost::mpi::communicator world; + double a = 0; + double b = 5; + int n = 10000; + + // Create function y = 2*(x-1) + std::function _f = [](double x) { return 2 * (x - 1); }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, sinus) { + boost::mpi::communicator world; + double a = 0; + auto b = static_cast(std::numbers::pi); + int n = 10000; + + // Create function y = sin(x) + std::function _f = [](double x) { return sin(x); }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, sqrt) { + boost::mpi::communicator world; + double a = 0; + double b = 4; + int n = 10000; + + // Create function y = sqrt(x) + std::function _f = [](double x) { return sqrt(x); }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, simple_ln) { + boost::mpi::communicator world; + double a = 1; + auto b = static_cast(std::numbers::e); + int n = 10000; + + // Create function y = ln(x) + std::function _f = [](double x) { return log(x); }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, simple_ln_with_right_border_sqr_exp) { + boost::mpi::communicator world; + double a = 1; + auto b = static_cast(std::numbers::e * std::numbers::e); + int n = 10000; + + // Create function y = ln(x) + std::function _f = [](double x) { return log(x); }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, equal_borders) { + boost::mpi::communicator world; + double a = 1; + double b = 1; + int n = 10000; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, parabola_with_large_result) { + boost::mpi::communicator world; + double a = 0; + double b = 100; + int n = 100000; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, cosinus_result_equals_zero) { + boost::mpi::communicator world; + double a = 0; + auto b = static_cast(std::numbers::pi); + int n = 10000; + + // Create function y = cos(x) + std::function _f = [](double x) { return cos(x); }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_mpi_func_test, cosinus_result_less_than_zero) { + boost::mpi::communicator world; + double a = 0; + auto b = static_cast(std::numbers::pi); + int n = 10000; + + // Create function y = cos(x) - 1 + std::function _f = [](double x) { return cos(x) - 1; }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + std::vector reference_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.add_function(_f); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + // Create Task + ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + testMpiTaskSequential.add_function(_f); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + } + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); +} \ No newline at end of file diff --git a/tasks/mpi/ivanov_m_integration_trapezoid/include/ops_mpi.hpp b/tasks/mpi/ivanov_m_integration_trapezoid/include/ops_mpi.hpp new file mode 100644 index 00000000000..390c5d7de8a --- /dev/null +++ b/tasks/mpi/ivanov_m_integration_trapezoid/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2024 Ivanov Mike +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace ivanov_m_integration_trapezoid_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + void add_function(const ::std::function& f); + + private: + double a_{}, b_{}; + int n_{}; + double result_{}; + std::function f_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void add_function(const ::std::function& f); + + private: + double a_{}, b_{}, result_{}; + int n_{}; + std::function f_; + boost::mpi::communicator world; +}; + +} // namespace ivanov_m_integration_trapezoid_mpi \ No newline at end of file diff --git a/tasks/mpi/ivanov_m_integration_trapezoid/perf_tests/main.cpp b/tasks/mpi/ivanov_m_integration_trapezoid/perf_tests/main.cpp new file mode 100644 index 00000000000..b20f3fe044f --- /dev/null +++ b/tasks/mpi/ivanov_m_integration_trapezoid/perf_tests/main.cpp @@ -0,0 +1,104 @@ +// Copyright 2024 Ivanov Mike +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/ivanov_m_integration_trapezoid/include/ops_mpi.hpp" + +TEST(ivanov_m_integration_trapezoid_mpi_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + double a = 0; + double b = 1; + int n = 1000; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + testMpiTaskParallel->add_function(_f); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double result = 1.0 / 3.0; + ASSERT_NEAR(result, global_result[0], 1e-3); + } +} + +TEST(ivanov_m_integration_trapezoid_mpi_perf_test, test_task_run) { + boost::mpi::communicator world; + double a = 0; + double b = 1; + int n = 1000; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + std::vector global_vec = {a, b, static_cast(n)}; + std::vector global_result(1, 0.0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + testMpiTaskParallel->add_function(_f); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double result = 1.0 / 3.0; + ASSERT_NEAR(result, global_result[0], 1e-3); + } +} \ No newline at end of file diff --git a/tasks/mpi/ivanov_m_integration_trapezoid/src/ops_mpi.cpp b/tasks/mpi/ivanov_m_integration_trapezoid/src/ops_mpi.cpp new file mode 100644 index 00000000000..4be3307f50e --- /dev/null +++ b/tasks/mpi/ivanov_m_integration_trapezoid/src/ops_mpi.cpp @@ -0,0 +1,95 @@ +// Copyright 2024 Ivanov Mike +#include "mpi/ivanov_m_integration_trapezoid/include/ops_mpi.hpp" + +bool ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + auto* input = reinterpret_cast(taskData->inputs[0]); + a_ = input[0]; + b_ = input[1]; + n_ = static_cast(input[2]); + result_ = 0.0; + + return true; +} + +bool ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential::run() { + internal_order_test(); + if (a_ == b_) return true; + double step_ = (b_ - a_) / n_; + for (int i = 0; i < n_; i++) result_ += (f_(a_ + i * step_) + f_(a_ + (i + 1) * step_)); + result_ = result_ / 2 * step_; + return true; +} + +bool ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; +} + +void ivanov_m_integration_trapezoid_mpi::TestMPITaskSequential::add_function(const std::function& f) { + f_ = f; +} + +bool ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + auto* input = reinterpret_cast(taskData->inputs[0]); + a_ = input[0]; + b_ = input[1]; + n_ = static_cast(input[2]); + result_ = 0.0; + } + return true; +} + +bool ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int rank = world.rank(); + int size = world.size(); + double step; + double local_result = 0.0; + if (rank == 0) { + step = (b_ - a_) / n_; + } + broadcast(world, a_, 0); + broadcast(world, b_, 0); + broadcast(world, n_, 0); + broadcast(world, step, 0); + + if (a_ == b_) return true; + + for (int i = rank; i < n_; i += size) local_result += (f_(a_ + i * step) + f_(a_ + (i + 1) * step)); + reduce(world, local_result, result_, std::plus<>(), 0); + + if (rank == 0) result_ = result_ / 2 * step; + + return true; +} + +bool ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result_; + } + return true; +} + +void ivanov_m_integration_trapezoid_mpi::TestMPITaskParallel::add_function(const std::function& f) { + f_ = f; +} \ No newline at end of file diff --git a/tasks/seq/ivanov_m_integration_trapezoid/func_tests/main.cpp b/tasks/seq/ivanov_m_integration_trapezoid/func_tests/main.cpp new file mode 100644 index 00000000000..ad30b48b927 --- /dev/null +++ b/tasks/seq/ivanov_m_integration_trapezoid/func_tests/main.cpp @@ -0,0 +1,361 @@ +// Copyright 2024 Ivanov Mike +#include + +#include +#include +#include +#include + +#include "seq/ivanov_m_integration_trapezoid/include/ops_seq.hpp" + +TEST(ivanov_m_integration_trapezoid_seq_func_test, simple_parabola) { + const double a = 0; + const double b = 1; + const int n = 1000; + const double res = 1.0 / 3.0; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, simple_parabola_swapped_borders) { + const double a = 1; + const double b = 0; + const int n = 1000; + const double res = -1.0 / 3.0; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, line_function) { + const double a = 0; + const double b = 5; + const int n = 1000; + const double res = 15.0; + + // Create function y = 2*(x-1) + std::function _f = [](double x) { return 2 * (x - 1); }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, sinus) { + const double a = 0; + const auto b = static_cast(std::numbers::pi); + const int n = 1000; + const double res = 2.0; + + // Create function y = sin(x) + std::function _f = [](double x) { return sin(x); }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, sqrt) { + const double a = 0; + const double b = 4; + const int n = 1000; + const double res = 16.0 / 3.0; + + // Create function + std::function _f = [](double x) { return sqrt(x); }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, simple_ln) { + const double a = 1; + const auto b = static_cast(std::numbers::e); + const int n = 1000; + const double res = 1.0; + + // Create function + std::function _f = [](double x) { return log(x); }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, ln_with_sqr_border_exp) { + const double a = 1; + const auto b = static_cast(std::numbers::e * std::numbers::e); + const int n = 1000; + const double res = static_cast(std::numbers::e * std::numbers::e) + 1.0; + + // Create function + std::function _f = [](double x) { return log(x); }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, equal_borders) { + const double a = 1; + const double b = 1; + const int n = 1000; + const double res = 0.0; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, parabola_with_large_result) { + const double a = 0; + const double b = 100; + const int n = 100000; + const double res = 1000000.0 / 3.0; + + // Create function y = x^2 + std::function _f = [](double x) { return x * x; }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, cosinus_result_equals_zero) { + const double a = 0; + const auto b = static_cast(std::numbers::pi); + const int n = 10000; + const double res = 0.0; + + // Create function y = cos(x) + std::function _f = [](double x) { return cos(x); }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} + +TEST(ivanov_m_integration_trapezoid_seq_func_test, cosinus_result_less_than_zero) { + const double a = 0; + const auto b = static_cast(std::numbers::pi); + const int n = 10000; + const double res = 0.0; + + // Create function y = cos(x) - 1 + std::function _f = [](double x) { return cos(x); }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + + ivanov_m_integration_trapezoid_seq::TestTaskSequential testTaskSequential(taskDataSeq); + testTaskSequential.add_function(_f); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(res, out[0], 1e-3); +} \ No newline at end of file diff --git a/tasks/seq/ivanov_m_integration_trapezoid/include/ops_seq.hpp b/tasks/seq/ivanov_m_integration_trapezoid/include/ops_seq.hpp new file mode 100644 index 00000000000..cfd1a5e742d --- /dev/null +++ b/tasks/seq/ivanov_m_integration_trapezoid/include/ops_seq.hpp @@ -0,0 +1,27 @@ +// Copyright 2024 Ivanov Mike +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace ivanov_m_integration_trapezoid_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + void add_function(const ::std::function& f); + + private: + double a_{}, b_{}, result_{}; + int n_{}; + std::function f_; +}; + +} // namespace ivanov_m_integration_trapezoid_seq \ No newline at end of file diff --git a/tasks/seq/ivanov_m_integration_trapezoid/perf_tests/main.cpp b/tasks/seq/ivanov_m_integration_trapezoid/perf_tests/main.cpp new file mode 100644 index 00000000000..d201c67a1f7 --- /dev/null +++ b/tasks/seq/ivanov_m_integration_trapezoid/perf_tests/main.cpp @@ -0,0 +1,92 @@ +// Copyright 2024 Ivanov Mike +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/ivanov_m_integration_trapezoid/include/ops_seq.hpp" + +TEST(ivanov_m_integration_trapezoid_seq_perf_test, test_pipeline_run) { + const double a = 0.0; + const double b = 1.0; + const int n = 100; + + // Create function + std::function _f = [](double x) { return x * x; }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + testTaskSequential->add_function(_f); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} + +TEST(ivanov_m_integration_trapezoid_seq_perf_test, test_task_run) { + const double a = 0; + const double b = 1; + const int n = 100; + + // Create function + std::function _f = [](double x) { return x * x; }; + + // Create data + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + testTaskSequential->add_function(_f); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} \ No newline at end of file diff --git a/tasks/seq/ivanov_m_integration_trapezoid/src/ops_seq.cpp b/tasks/seq/ivanov_m_integration_trapezoid/src/ops_seq.cpp new file mode 100644 index 00000000000..132d4482fff --- /dev/null +++ b/tasks/seq/ivanov_m_integration_trapezoid/src/ops_seq.cpp @@ -0,0 +1,39 @@ +// Copyright 2024 Ivanov Mike +#include "seq/ivanov_m_integration_trapezoid/include/ops_seq.hpp" + +bool ivanov_m_integration_trapezoid_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; +} + +bool ivanov_m_integration_trapezoid_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + auto* input = reinterpret_cast(taskData->inputs[0]); + a_ = input[0]; + b_ = input[1]; + n_ = static_cast(input[2]); + result_ = 0.0; + + return true; +} + +bool ivanov_m_integration_trapezoid_seq::TestTaskSequential::run() { + internal_order_test(); + if (a_ == b_) return true; + double step_ = (b_ - a_) / n_; + for (int i = 0; i < n_; i++) result_ += (f_(a_ + i * step_) + f_(a_ + (i + 1) * step_)); + result_ = result_ / 2 * step_; + return true; +} + +bool ivanov_m_integration_trapezoid_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; +} + +void ivanov_m_integration_trapezoid_seq::TestTaskSequential::add_function(const std::function& f) { + f_ = f; +} \ No newline at end of file From 14c4c82cfe2407c92d623cdeab01d96ccd9d5077 Mon Sep 17 00:00:00 2001 From: KoshkinMatvey <132918746+KoshkinMatvey@users.noreply.github.com> Date: Tue, 5 Nov 2024 03:33:50 +0300 Subject: [PATCH 108/155] =?UTF-8?q?=D0=9A=D0=BE=D1=88=D0=BA=D0=B8=D0=BD=20?= =?UTF-8?q?=D0=9C=D0=B0=D1=82=D0=B2=D0=B5=D0=B9.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=209.=20=D0=A1=D0=BA=D0=B0=D0=BB=D1=8F=D1=80=D0=BD=D0=BE?= =?UTF-8?q?=D0=B5=20=D0=BF=D1=80=D0=BE=D0=B8=D0=B7=D0=B2=D0=B5=D0=B4=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=BE?= =?UTF-8?q?=D0=B2=20(#141)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Задача состоит в том, чтобы выполнить поэлементное умножение двух векторов. Каждый элемент первого вектора умножается на соответствующий элемент второго вектора, и полученные произведения суммируются. В итоге получаем значение скалярного произведения Описание MPI задачи: Задан двумерный вектор целых чисел, содержащий два входных вектора. Эти векторы разделяются на равные части в соответствии с количеством процессов. Каждый процесс получает свою часть данных, где выполняется поэлементное умножение соответствующих элементов двух векторов. После этого произведения в каждом процессе суммируются, а результаты передаются на нулевой процесс, там они суммируются, для получения итогового произведения --- .../func_tests/main.cpp | 395 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 +++ .../perf_tests/main.cpp | 109 +++++ .../src/ops_mpi.cpp | 124 ++++++ .../func_tests/main.cpp | 152 +++++++ .../include/ops_seq.hpp | 26 ++ .../perf_tests/main.cpp | 101 +++++ .../src/ops_seq.cpp | 37 ++ 8 files changed, 992 insertions(+) create mode 100644 tasks/mpi/koshkin_m_scalar_product_of_vectors/func_tests/main.cpp create mode 100644 tasks/mpi/koshkin_m_scalar_product_of_vectors/include/ops_mpi.hpp create mode 100644 tasks/mpi/koshkin_m_scalar_product_of_vectors/perf_tests/main.cpp create mode 100644 tasks/mpi/koshkin_m_scalar_product_of_vectors/src/ops_mpi.cpp create mode 100644 tasks/seq/koshkin_m_scalar_product_of_vectors/func_tests/main.cpp create mode 100644 tasks/seq/koshkin_m_scalar_product_of_vectors/include/ops_seq.hpp create mode 100644 tasks/seq/koshkin_m_scalar_product_of_vectors/perf_tests/main.cpp create mode 100644 tasks/seq/koshkin_m_scalar_product_of_vectors/src/ops_seq.cpp diff --git a/tasks/mpi/koshkin_m_scalar_product_of_vectors/func_tests/main.cpp b/tasks/mpi/koshkin_m_scalar_product_of_vectors/func_tests/main.cpp new file mode 100644 index 00000000000..942eddcab0e --- /dev/null +++ b/tasks/mpi/koshkin_m_scalar_product_of_vectors/func_tests/main.cpp @@ -0,0 +1,395 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/koshkin_m_scalar_product_of_vectors/include/ops_mpi.hpp" + +static int offset = 0; + +int koshkin_m_scalar_product_of_vectors::calculateDotProduct(const std::vector& vec_1, + const std::vector& vec_2) { + long result = 0; + for (size_t i = 0; i < vec_1.size(); i++) result += vec_1[i] * vec_2[i]; + return result; +} + +std::vector koshkin_m_scalar_product_of_vectors::generateRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(koshkin_m_scalar_product_of_vectors, check_vec_equal) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100; + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector); + + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, check_vec_no_equal) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100; + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector + 10); + + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, multiply_vec_size_100) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 100; + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector); + + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_res(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + koshkin_m_scalar_product_of_vectors::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, check_mpi_vectorDotProduct_empty) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector vec_1 = {}; + std::vector vec_2 = {}; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2), res[0]); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, multiply_vec_size_300) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 300; + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector); + + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_res(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + koshkin_m_scalar_product_of_vectors::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, multiply_vec_size_600) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 600; + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size_vector); + + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_res(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + koshkin_m_scalar_product_of_vectors::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, check_mpi_vectorDotProduct_binary) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector vec_1 = {1, 2}; + std::vector vec_2 = {4, 7}; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2), res[0]); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, check_mpi_vectorDotProduct_ternary) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector vec_1 = {1, 2, 5}; + std::vector vec_2 = {4, 7, 8}; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2), res[0]); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, check_mpi_run_size_4) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector vec_1 = {1, 2, 5, 6}; + std::vector vec_2 = {4, 7, 8, 9}; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2), res[0]); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, check_mpi_run_size_7) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector vec_1 = {1, 2, 5, 6, 9, 13, 5}; + std::vector vec_2 = {4, 7, 8, 9, 8, 4, 17}; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2), res[0]); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, check_mpi_run_random_size) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + std::srand(static_cast(std::time(nullptr))); + int random_size = 1 + std::rand() % 100; + + if (world.rank() == 0) { + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(random_size); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(random_size); + + global_vec = {vec_1, vec_2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + koshkin_m_scalar_product_of_vectors::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/koshkin_m_scalar_product_of_vectors/include/ops_mpi.hpp b/tasks/mpi/koshkin_m_scalar_product_of_vectors/include/ops_mpi.hpp new file mode 100644 index 00000000000..4f395f43940 --- /dev/null +++ b/tasks/mpi/koshkin_m_scalar_product_of_vectors/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace koshkin_m_scalar_product_of_vectors { +std::vector generateRandomVector(int v_size); +int calculateDotProduct(const std::vector& vec_1, const std::vector& vec_2); +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_{}; + std::vector local_input1_{}, local_input2_{}; + std::vector counts_{}; + int res{}; + boost::mpi::communicator world; +}; + +} // namespace koshkin_m_scalar_product_of_vectors \ No newline at end of file diff --git a/tasks/mpi/koshkin_m_scalar_product_of_vectors/perf_tests/main.cpp b/tasks/mpi/koshkin_m_scalar_product_of_vectors/perf_tests/main.cpp new file mode 100644 index 00000000000..6598e2e2b20 --- /dev/null +++ b/tasks/mpi/koshkin_m_scalar_product_of_vectors/perf_tests/main.cpp @@ -0,0 +1,109 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/koshkin_m_scalar_product_of_vectors/include/ops_mpi.hpp" + +static int offset = 0; + +int koshkin_m_scalar_product_of_vectors::calculateDotProduct(const std::vector& vec_1, + const std::vector& vec_2) { + long result = 0; + for (size_t i = 0; i < vec_1.size(); i++) result += vec_1[i] * vec_2[i]; + return result; +} + +std::vector koshkin_m_scalar_product_of_vectors::generateRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(koshkin_m_scalar_product_of_vectors, test_pipeline_run) { + int count_size = 10000000; + boost::mpi::communicator world; + std::vector> global_vec; + + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size); + + std::vector res(1, 0); + global_vec = {vec_1, vec_2}; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + int ans = koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, res[0]); + } +} + +TEST(koshkin_m_scalar_product_of_vectors, test_task_run) { + int count_size = 10000000; + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count_size); + std::shared_ptr taskDataPar = std::make_shared(); + global_vec = {vec_1, vec_2}; + + if (world.rank() == 0) { + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(koshkin_m_scalar_product_of_vectors::calculateDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/koshkin_m_scalar_product_of_vectors/src/ops_mpi.cpp b/tasks/mpi/koshkin_m_scalar_product_of_vectors/src/ops_mpi.cpp new file mode 100644 index 00000000000..0ba63fb6710 --- /dev/null +++ b/tasks/mpi/koshkin_m_scalar_product_of_vectors/src/ops_mpi.cpp @@ -0,0 +1,124 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/koshkin_m_scalar_product_of_vectors/include/ops_mpi.hpp" + +#include +#include +#include +#include + +bool koshkin_m_scalar_product_of_vectors::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector>(taskData->inputs.size()); + for (size_t i = 0; i < input_.size(); i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + for (size_t j = 0; j < taskData->inputs_count[i]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res = 0; + return true; +} + +bool koshkin_m_scalar_product_of_vectors::TestMPITaskSequential::validation() { + internal_order_test(); + return (taskData->inputs.size() == 2 && taskData->inputs.size() == taskData->inputs_count.size() && + taskData->inputs_count[0] == taskData->inputs_count[1] && taskData->outputs.size() == 1 && + taskData->outputs.size() == taskData->outputs_count.size() && taskData->outputs_count[0] == 1); +} + +bool koshkin_m_scalar_product_of_vectors::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_[0].size(); i++) { + res += input_[0][i] * input_[1][i]; + } + return true; +} + +bool koshkin_m_scalar_product_of_vectors::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool koshkin_m_scalar_product_of_vectors::TestMPITaskParallel::pre_processing() { + internal_order_test(); + size_t total_el = 0; + size_t base_el = 0; + size_t extra_el = 0; + if (world.rank() == 0) { + total_el = taskData->inputs_count[0]; + base_el = total_el / world.size(); + extra_el = total_el % world.size(); + } + counts_.resize(world.size()); + + if (world.rank() == 0) { + counts_.assign(world.size(), base_el); + std::fill(counts_.begin(), counts_.begin() + extra_el, base_el + 1); + } + boost::mpi::broadcast(world, counts_.data(), world.size(), 0); + + if (world.rank() == 0) { + input_ = std::vector>(taskData->inputs.size()); + for (size_t i = 0; i < input_.size(); i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + for (size_t j = 0; j < taskData->inputs_count[i]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + } + res = 0; + return true; +} + +bool koshkin_m_scalar_product_of_vectors::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return (taskData->inputs.size() == 2 && taskData->inputs.size() == taskData->inputs_count.size() && + taskData->inputs_count[0] == taskData->inputs_count[1] && taskData->outputs.size() == 1 && + taskData->outputs.size() == taskData->outputs_count.size() && taskData->outputs_count[0] == 1); + } + return true; +} + +bool koshkin_m_scalar_product_of_vectors::TestMPITaskParallel::run() { + internal_order_test(); + if (world.rank() == 0) { + size_t offset_extra = counts_[0]; + for (int proces = 1; proces < world.size(); proces++) { + size_t cur_accout = counts_[proces]; + world.send(proces, 0, input_[0].data() + offset_extra, cur_accout); + world.send(proces, 1, input_[1].data() + offset_extra, cur_accout); + offset_extra += cur_accout; + } + } + + local_input1_ = std::vector(counts_[world.rank()]); + local_input2_ = std::vector(counts_[world.rank()]); + + if (world.rank() > 0) { + world.recv(0, 0, local_input1_.data(), counts_[world.rank()]); + world.recv(0, 1, local_input2_.data(), counts_[world.rank()]); + } else { + local_input1_ = std::vector(input_[0].begin(), input_[0].begin() + counts_[0]); + local_input2_ = std::vector(input_[1].begin(), input_[1].begin() + counts_[0]); + } + + int local_res = 0; + + for (size_t i = 0; i < local_input1_.size(); i++) { + local_res += local_input1_[i] * local_input2_[i]; + } + boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); + return true; +} + +bool koshkin_m_scalar_product_of_vectors::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/koshkin_m_scalar_product_of_vectors/func_tests/main.cpp b/tasks/seq/koshkin_m_scalar_product_of_vectors/func_tests/main.cpp new file mode 100644 index 00000000000..8bd0ea007bc --- /dev/null +++ b/tasks/seq/koshkin_m_scalar_product_of_vectors/func_tests/main.cpp @@ -0,0 +1,152 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/koshkin_m_scalar_product_of_vectors/include/ops_seq.hpp" + +static int offset = 0; + +int koshkin_m_scalar_product_of_vectors::calculateDotProduct(const std::vector &vec_1, + const std::vector &vec_2) { + long result = 0; + for (size_t i = 0; i < vec_1.size(); i++) result += vec_1[i] * vec_2[i]; + return result; +} + +int koshkin_m_scalar_product_of_vectors::generateRandomNumber(int min, int max) { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> distrib(min, max); + return distrib(gen); +} + +std::vector koshkin_m_scalar_product_of_vectors::generateRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(koshkin_m_scalar_product_of_vectors, check_empty_func_vector_product) { + const int count = 0; + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + int answer = koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2); + ASSERT_EQ(0, answer); +} + +TEST(koshkin_m_scalar_product_of_vectors, check_scalary_product_of_vectors_size_20) { + const int count = 20; + + std::vector out(1, 0); + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_2.data())); + + taskDataSeq->inputs_count.emplace_back(vec_1.size()); + taskDataSeq->inputs_count.emplace_back(vec_2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + koshkin_m_scalar_product_of_vectors::VectorDotProduct VectorDotProduct(taskDataSeq); + ASSERT_EQ(VectorDotProduct.validation(), true); + VectorDotProduct.pre_processing(); + VectorDotProduct.run(); + VectorDotProduct.post_processing(); + int answer = koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2); + ASSERT_EQ(answer, out[0]); +} + +TEST(koshkin_m_scalar_product_of_vectors, check_scalary_product_of_vectors_size_300) { + const int count = 300; + + std::vector out(1, 0); + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_2.data())); + + taskDataSeq->inputs_count.emplace_back(vec_1.size()); + taskDataSeq->inputs_count.emplace_back(vec_2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + koshkin_m_scalar_product_of_vectors::VectorDotProduct VectorDotProduct(taskDataSeq); + ASSERT_EQ(VectorDotProduct.validation(), true); + VectorDotProduct.pre_processing(); + VectorDotProduct.run(); + VectorDotProduct.post_processing(); + int answer = koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2); + ASSERT_EQ(answer, out[0]); +} + +TEST(koshkin_m_scalar_product_of_vectors, check_run_correct_binary_sys) { + std::vector out(1, 0); + + std::vector vec_1 = {5, 2}; + std::vector vec_2 = {6, 10}; + + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_2.data())); + + taskDataSeq->inputs_count.emplace_back(vec_1.size()); + taskDataSeq->inputs_count.emplace_back(vec_2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + koshkin_m_scalar_product_of_vectors::VectorDotProduct VectorDotProduct(taskDataSeq); + ASSERT_EQ(VectorDotProduct.validation(), true); + VectorDotProduct.pre_processing(); + VectorDotProduct.run(); + VectorDotProduct.post_processing(); + ASSERT_EQ(50, out[0]); +} + +TEST(koshkin_m_scalar_product_of_vectors, check_calculateDotProduct_right_binary_sys) { + std::vector vec_1 = {5, 2}; + std::vector vec_2 = {7, 3}; + ASSERT_EQ(41, koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2)); +} + +TEST(koshkin_m_scalar_product_of_vectors, check_calculateDotProduct_right_ternary_sys) { + std::vector vec_1 = {5, 2, 10}; + std::vector vec_2 = {7, 3, 1}; + ASSERT_EQ(51, koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2)); +} + +TEST(koshkin_m_scalar_product_of_vectors, check_calculateDotProduct_binary_neg) { + std::vector vec_1 = {-1, -8}; + std::vector vec_2 = {-5, 7}; + ASSERT_EQ(-51, koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2)); +} + +TEST(koshkin_m_scalar_product_of_vectors, check_calculateDotProduct_random_size) { + int size = koshkin_m_scalar_product_of_vectors::generateRandomNumber(1, 100); + std::vector vec_1(size); + std::vector vec_2(size); + for (int i = 0; i < size; ++i) { + vec_1[i] = koshkin_m_scalar_product_of_vectors::generateRandomNumber(-10, 10); + vec_2[i] = koshkin_m_scalar_product_of_vectors::generateRandomNumber(-10, 10); + } + + long expected_result = 0; + for (int i = 0; i < size; ++i) { + expected_result += vec_1[i] * vec_2[i]; + } + + ASSERT_EQ(expected_result, koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2)); +} \ No newline at end of file diff --git a/tasks/seq/koshkin_m_scalar_product_of_vectors/include/ops_seq.hpp b/tasks/seq/koshkin_m_scalar_product_of_vectors/include/ops_seq.hpp new file mode 100644 index 00000000000..1908923ad8b --- /dev/null +++ b/tasks/seq/koshkin_m_scalar_product_of_vectors/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace koshkin_m_scalar_product_of_vectors { +std::vector generateRandomVector(int v_size); +int generateRandomNumber(int min, int max); +int calculateDotProduct(const std::vector& vec_1, const std::vector& vec_2); +class VectorDotProduct : public ppc::core::Task { + public: + explicit VectorDotProduct(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int res{}; + std::vector> input_; +}; + +} // namespace koshkin_m_scalar_product_of_vectors \ No newline at end of file diff --git a/tasks/seq/koshkin_m_scalar_product_of_vectors/perf_tests/main.cpp b/tasks/seq/koshkin_m_scalar_product_of_vectors/perf_tests/main.cpp new file mode 100644 index 00000000000..80557adf9c9 --- /dev/null +++ b/tasks/seq/koshkin_m_scalar_product_of_vectors/perf_tests/main.cpp @@ -0,0 +1,101 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/koshkin_m_scalar_product_of_vectors/include/ops_seq.hpp" + +static int offset = 0; + +int koshkin_m_scalar_product_of_vectors::calculateDotProduct(const std::vector &vec_1, + const std::vector &vec_2) { + long result = 0; + for (size_t i = 0; i < vec_1.size(); i++) result += vec_1[i] * vec_2[i]; + return result; +} + +std::vector koshkin_m_scalar_product_of_vectors::generateRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(koshkin_m_scalar_product_of_vectors, test_pipeline_run) { + const int count = 22800000; + std::vector out(1, 0); + + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_2.data())); + + taskDataSeq->inputs_count.emplace_back(vec_1.size()); + taskDataSeq->inputs_count.emplace_back(vec_2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + int answer = koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2); + ASSERT_EQ(answer, out[0]); +} + +TEST(koshkin_m_scalar_product_of_vectors, test_task_run) { + const int count = 22800000; + std::vector out(1, 0); + + std::vector vec_1 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + std::vector vec_2 = koshkin_m_scalar_product_of_vectors::generateRandomVector(count); + + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vec_2.data())); + + taskDataSeq->inputs_count.emplace_back(vec_1.size()); + taskDataSeq->inputs_count.emplace_back(vec_2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + int answer = koshkin_m_scalar_product_of_vectors::calculateDotProduct(vec_1, vec_2); + ASSERT_EQ(answer, out[0]); +} diff --git a/tasks/seq/koshkin_m_scalar_product_of_vectors/src/ops_seq.cpp b/tasks/seq/koshkin_m_scalar_product_of_vectors/src/ops_seq.cpp new file mode 100644 index 00000000000..a8d9a209d17 --- /dev/null +++ b/tasks/seq/koshkin_m_scalar_product_of_vectors/src/ops_seq.cpp @@ -0,0 +1,37 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/koshkin_m_scalar_product_of_vectors/include/ops_seq.hpp" + +bool koshkin_m_scalar_product_of_vectors::VectorDotProduct::pre_processing() { + internal_order_test(); + input_ = std::vector>(taskData->inputs.size()); + for (size_t i = 0; i < input_.size(); i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + for (size_t j = 0; j < taskData->inputs_count[i]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res = 0; + return true; +} + +bool koshkin_m_scalar_product_of_vectors::VectorDotProduct::validation() { + internal_order_test(); + return (taskData->inputs.size() == 2 && taskData->inputs.size() == taskData->inputs_count.size() && + taskData->inputs_count[0] == taskData->inputs_count[1] && taskData->outputs.size() == 1 && + taskData->outputs.size() == taskData->outputs_count.size() && taskData->outputs_count[0] == 1); +} + +bool koshkin_m_scalar_product_of_vectors::VectorDotProduct::run() { + internal_order_test(); + for (size_t i = 0; i < input_[0].size(); i++) { + res += input_[0][i] * input_[1][i]; + } + return true; +} + +bool koshkin_m_scalar_product_of_vectors::VectorDotProduct::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} \ No newline at end of file From 570178e72da795c085af61bb1681b4cfbb70acca Mon Sep 17 00:00:00 2001 From: Sean0450 <65349287+Sean0450@users.noreply.github.com> Date: Tue, 5 Nov 2024 03:36:33 +0300 Subject: [PATCH 109/155] =?UTF-8?q?=D0=A1=D0=B0=D0=B4=D0=B8=D0=BA=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=98=D0=B2=D0=B0=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2012.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D0=B7=D0=BD=D0=B0?= =?UTF-8?q?=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82?= =?UTF-8?q?=D0=BE=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80?= =?UTF-8?q?=D0=B8=D1=86=D1=8B=20(#135)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательного решения: Входные данные транспонируются и записываются в вектор matrix. С помощью цикла по количеству столбцов проходим по вектору matrix и при помощи std::accumulate получаем сумму по каждому столбцу. Данные записываются в вектор sum. Описание параллельного решения: Входные данные транспонируются и записываются в вектор matrix. Далее высчитываем delta - количество столбцов, выделяемое каждому процессу. Если количество столбцов не делится между процессами нацело, оставшиеся столбцы передаются последнему процессу. Каждый из процессов проходит по своим столбцам при помощи std::accumulate и записывает результат в вектор local_input. Позднее все векторы объединяются в один, который передается в ответ. --- .../func_tests/main.cpp | 436 ++++++++++++++++++ .../include/ops_mpi.h | 47 ++ .../perf_tests/main.cpp | 85 ++++ .../src/mpi_src.cpp | 152 ++++++ .../func_tests/seq_func_tests.cpp | 155 +++++++ .../include/sq_task.h | 29 ++ .../perf_tests/main.cpp | 65 +++ .../src/ops_seq.cpp | 61 +++ 8 files changed, 1030 insertions(+) create mode 100644 tasks/mpi/sadikov_I_sum_values_by_columns_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/sadikov_I_sum_values_by_columns_matrix/include/ops_mpi.h create mode 100644 tasks/mpi/sadikov_I_sum_values_by_columns_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/sadikov_I_sum_values_by_columns_matrix/src/mpi_src.cpp create mode 100644 tasks/seq/sadikov_I_sum_values_by_columns_matrix/func_tests/seq_func_tests.cpp create mode 100644 tasks/seq/sadikov_I_sum_values_by_columns_matrix/include/sq_task.h create mode 100644 tasks/seq/sadikov_I_sum_values_by_columns_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/sadikov_I_sum_values_by_columns_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/func_tests/main.cpp b/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..4902da3b65a --- /dev/null +++ b/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/func_tests/main.cpp @@ -0,0 +1,436 @@ +#include + +#include +#include +#include +#include +#include + +#include "mpi/sadikov_I_sum_values_by_columns_matrix/include/ops_mpi.h" + +std::vector GetRandomData(size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = (gen() % 100) - 49; + } + return vec; +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, checkvalidation) { + const int columns = 15; + const int rows = 15; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out(columns, 0); + auto taskData = std::make_shared(); + in = std::vector(rows * columns, 1); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv(taskData); + ASSERT_EQ(sv.validation(), true); +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, checkvalidation2) { + boost::mpi::communicator world; + const int columns = 15; + const int rows = 15; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = std::vector(rows * columns, 1); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv(taskData); + ASSERT_EQ(sv.validation(), true); +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_square_matrix) { + boost::mpi::communicator world; + const int columns = 15; + const int rows = 15; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = std::vector(rows * columns, 1); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_rect_matrix) { + boost::mpi::communicator world; + const int columns = 150; + const int rows = 15; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = std::vector(rows * columns, 1); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_square_matrix2) { + boost::mpi::communicator world; + const int columns = 105; + const int rows = 105; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = GetRandomData(columns * rows); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_square_matrix3) { + boost::mpi::communicator world; + const int columns = 333; + const int rows = 333; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = GetRandomData(columns * rows); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_rect_matrix2) { + boost::mpi::communicator world; + const int columns = 67; + const int rows = 105; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = GetRandomData(columns * rows); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_rect_matrix3) { + boost::mpi::communicator world; + const int columns = 67; + const int rows = 105; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = GetRandomData(columns * rows); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_one_row) { + boost::mpi::communicator world; + const int columns = 67; + const int rows = 1; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = GetRandomData(columns * rows); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_one_column) { + boost::mpi::communicator world; + const int columns = 1; + const int rows = 130; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = GetRandomData(columns * rows); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_one_element) { + boost::mpi::communicator world; + const int columns = 1; + const int rows = 1; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = GetRandomData(columns * rows); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, check_empty_matrix) { + boost::mpi::communicator world; + const int columns = 0; + const int rows = 0; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = GetRandomData(columns * rows); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel sv_par(taskData); + ASSERT_EQ(sv_par.validation(), true); + sv_par.pre_processing(); + sv_par.run(); + sv_par.post_processing(); + if (world.rank() == 0) { + std::vector out_seq(columns, 0); + auto taskData_seq = std::make_shared(); + taskData_seq->inputs.emplace_back(reinterpret_cast(in.data())); + taskData_seq->inputs_count.emplace_back(in_index[0]); + taskData_seq->inputs_count.emplace_back(in_index[1]); + taskData_seq->outputs.emplace_back(reinterpret_cast(out_seq.data())); + taskData_seq->outputs_count.emplace_back(out_seq.size()); + sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask sv_seq(taskData_seq); + ASSERT_EQ(sv_seq.validation(), true); + sv_seq.pre_processing(); + sv_seq.run(); + sv_seq.post_processing(); + ASSERT_EQ(out_seq, out_par); + } +} \ No newline at end of file diff --git a/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/include/ops_mpi.h b/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/include/ops_mpi.h new file mode 100644 index 00000000000..b3cdb649fa3 --- /dev/null +++ b/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/include/ops_mpi.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace sadikov_I_Sum_values_by_columns_matrix_mpi { +class MPITask : public ppc::core::Task { + private: + std::vector sum; + std::vector matrix; + size_t rows_count, columns_count = 0; + + public: + explicit MPITask(std::shared_ptr td); + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + void calculate(size_t size); +}; + +class MPITaskParallel : public ppc::core::Task { + private: + std::vector sum; + std::vector matrix; + std::vector local_input; + size_t rows_count, columns_count = 0; + size_t last_column = 0; + size_t delta = 0; + boost::mpi::communicator world; + + public: + explicit MPITaskParallel(std::shared_ptr td); + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + std::vector calculate(size_t size); +}; +} // namespace sadikov_I_Sum_values_by_columns_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/perf_tests/main.cpp b/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..c7ec8bf5d50 --- /dev/null +++ b/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/perf_tests/main.cpp @@ -0,0 +1,85 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/sadikov_I_sum_values_by_columns_matrix/include/ops_mpi.h" + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, mpi_pipline_run) { + boost::mpi::communicator world; + const int columns = 3000; + const int rows = 3000; + std::vector in; + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + std::vector answer(columns, columns); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = std::vector(rows * columns, 1); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + auto sv_par = std::make_shared(taskData); + ASSERT_EQ(sv_par->validation(), true); + sv_par->pre_processing(); + sv_par->run(); + sv_par->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sv_par); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(answer, out_par); + } +} + +TEST(sadikov_I_Sum_values_by_columns_matrix_mpi, mpi_task_run) { + boost::mpi::communicator world; + const int columns = 3000; + const int rows = 3000; + std::vector in(columns * rows, 1); + std::vector in_index{rows, columns}; + std::vector out_par(columns, 0); + std::vector answer(columns, columns); + auto taskData = std::make_shared(); + if (world.rank() == 0) { + in = std::vector(rows * columns, 1); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in_index[0]); + taskData->inputs_count.emplace_back(in_index[1]); + taskData->outputs.emplace_back(reinterpret_cast(out_par.data())); + taskData->outputs_count.emplace_back(out_par.size()); + } + auto sv_par = std::make_shared(taskData); + ASSERT_EQ(sv_par->validation(), true); + sv_par->pre_processing(); + sv_par->run(); + sv_par->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sv_par); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(answer, out_par); + } +} \ No newline at end of file diff --git a/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/src/mpi_src.cpp b/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/src/mpi_src.cpp new file mode 100644 index 00000000000..f66af6efaf0 --- /dev/null +++ b/tasks/mpi/sadikov_I_sum_values_by_columns_matrix/src/mpi_src.cpp @@ -0,0 +1,152 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "mpi/sadikov_I_sum_values_by_columns_matrix/include/ops_mpi.h" + +sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask::MPITask(std::shared_ptr td) + : Task(std::move(td)) {} + +bool sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask::validation() { + internal_order_test(); + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask::pre_processing() { + internal_order_test(); + rows_count = static_cast(taskData->inputs_count[0]); + columns_count = static_cast(taskData->inputs_count[1]); + auto *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + matrix.reserve(columns_count * rows_count); + for (size_t i = 0; i < columns_count; ++i) { + for (size_t j = 0; j < rows_count; ++j) { + matrix.emplace_back(tmp_ptr[j * columns_count + i]); + } + } + sum = std::vector(columns_count); + return true; +} + +bool sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask::run() { + internal_order_test(); + calculate(columns_count); + return true; +} + +bool sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask::post_processing() { + internal_order_test(); + for (size_t i = 0; i < columns_count; ++i) { + reinterpret_cast(taskData->outputs[0])[i] = sum[i]; + } + return true; +} + +void sadikov_I_Sum_values_by_columns_matrix_mpi::MPITask::calculate(size_t size) { + for (size_t i = 0; i < size; ++i) { + sum[i] = std::accumulate(matrix.begin() + i * rows_count, matrix.begin() + (i + 1) * rows_count, 0); + } +} + +sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel::MPITaskParallel(std::shared_ptr td) + : Task(std::move(td)) {} + +bool sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[1] == taskData->outputs_count[0]; + } + return true; +} + +bool sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + rows_count = static_cast(taskData->inputs_count[0]); + columns_count = static_cast(taskData->inputs_count[1]); + delta = columns_count / world.size(); + last_column = columns_count % world.size(); + matrix.reserve(columns_count * rows_count); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < columns_count; ++i) { + for (size_t j = 0; j < rows_count; ++j) { + matrix.emplace_back(tmp_ptr[j * columns_count + i]); + } + } + } + return true; +} + +bool sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel::run() { + internal_order_test(); + broadcast(world, rows_count, 0); + broadcast(world, columns_count, 0); + broadcast(world, delta, 0); + broadcast(world, last_column, 0); + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + if (proc != world.size() - 1 && delta != 0) { + world.send(proc, 0, matrix.data() + proc * rows_count * delta, rows_count * delta); + } + if (proc == world.size() - 1 && delta != 0) { + world.send(proc, 0, matrix.data() + proc * rows_count * (delta), rows_count * (delta + last_column)); + } + } + } + if (delta != 0) { + local_input = (world.rank() != world.size() - 1) ? std::vector(rows_count * delta) + : std::vector(rows_count * (delta + last_column)); + } else { + local_input = std::vector(matrix.begin(), matrix.end()); + } + if (world.rank() == 0 && delta != 0) { + local_input = std::vector(matrix.begin(), matrix.begin() + rows_count * delta); + + } else if (world.rank() > 0 && delta != 0) { + world.recv(0, 0, local_input.data(), + (world.rank() != world.size() - 1) ? rows_count * delta : rows_count * (delta + last_column)); + } + size_t size = delta != 0 ? local_input.size() / rows_count : local_input.size(); + std::vector intermediate_res; + if (delta != 0) { + intermediate_res = calculate(size); + } + if (world.rank() == 0 && delta == 0 && !matrix.empty()) { + intermediate_res.emplace_back(std::accumulate(local_input.begin(), local_input.end(), 0)); + } + if (world.rank() == 0) { + std::vector localRes(columns_count); + std::vector sizes(world.size(), delta); + if (delta == 0 && !matrix.empty()) { + sizes.front() = 1; + } else if (delta != 0 && !matrix.empty()) { + sizes.back() = delta + last_column; + } + boost::mpi::gatherv(world, intermediate_res, localRes.data(), sizes, 0); + sum = localRes; + } else { + boost::mpi::gatherv(world, intermediate_res, 0); + } + return true; +} + +bool sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (size_t i = 0; i < columns_count; ++i) { + reinterpret_cast(taskData->outputs[0])[i] = sum[i]; + } + } + return true; +} + +std::vector sadikov_I_Sum_values_by_columns_matrix_mpi::MPITaskParallel::calculate(size_t size) { + std::vector in(size); + for (size_t i = 0; i < size; ++i) { + in[i] = std::accumulate(local_input.begin() + i * rows_count, local_input.begin() + (i + 1) * rows_count, 0); + } + return in; +} diff --git a/tasks/seq/sadikov_I_sum_values_by_columns_matrix/func_tests/seq_func_tests.cpp b/tasks/seq/sadikov_I_sum_values_by_columns_matrix/func_tests/seq_func_tests.cpp new file mode 100644 index 00000000000..853ca58ba39 --- /dev/null +++ b/tasks/seq/sadikov_I_sum_values_by_columns_matrix/func_tests/seq_func_tests.cpp @@ -0,0 +1,155 @@ +#include + +#include +#include +#include + +#include "seq/sadikov_I_sum_values_by_columns_matrix/include/sq_task.h" + +std::vector getRandomVector(size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = (gen() % 100) - 49; + } + return vec; +} + +TEST(sum_values_by_columns_matrix, check_validation1) { + std::vector in(144, 1); + std::vector in_index{12, 12}; + std::vector out(12, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + ASSERT_EQ(sv.validation(), true); +} + +TEST(sum_values_by_columns_matrix, check_validation2) { + std::vector in(144, 1); + std::vector in_index{12, 12}; + std::vector out(15, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + ASSERT_EQ(sv.validation(), false); +} + +TEST(sum_values_by_columns_matrix, check_empty_matrix) { + std::vector in(0); + std::vector in_index{0, 0}; + std::vector out(0, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + ASSERT_EQ(sv.validation(), true); + sv.pre_processing(); + sv.run(); + sv.post_processing(); + for (int i = 0; i < in_index[1]; ++i) { + EXPECT_NEAR(out[i], 0.0, 1e-6); + } +} + +TEST(sum_values_by_columns_matrix, check_square_matrix) { + std::vector in(144, 1); + std::vector in_index{12, 12}; + std::vector out(12, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + ASSERT_EQ(sv.validation(), true); + sv.pre_processing(); + sv.run(); + sv.post_processing(); + for (int i = 0; i < in_index[1]; ++i) { + ASSERT_EQ(out[i], in_index[0]); + } +} + +TEST(sum_values_by_columns_matrix, check_square_matrix2) { + std::vector in(256, 1); + std::vector in_index{16, 16}; + std::vector out(16, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + ASSERT_EQ(sv.validation(), true); + sv.pre_processing(); + sv.run(); + sv.post_processing(); + for (int i = 0; i < in_index[1]; ++i) { + ASSERT_EQ(out[i], in_index[0]); + } +} + +TEST(sum_values_by_columns_matrix, check_square_matrix3) { + std::vector in(256, 1); + std::vector in_index{16, 16}; + std::vector out(16, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + ASSERT_EQ(sv.validation(), true); + sv.pre_processing(); + sv.run(); + sv.post_processing(); + for (int i = 0; i < in_index[1]; ++i) { + ASSERT_EQ(out[i], in_index[0]); + } +} + +TEST(sum_values_by_columns_matrix, check_rect_matrix1) { + std::vector in(500, 1); + std::vector in_index{50, 10}; + std::vector out(10, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + ASSERT_EQ(sv.validation(), true); + sv.pre_processing(); + sv.run(); + sv.post_processing(); + for (int i = 0; i < in_index[1]; ++i) { + ASSERT_EQ(out[i], in_index[0]); + } +} + +TEST(sum_values_by_columns_matrix, check_rect_matrix2) { + std::vector in(10000, 1); + std::vector in_index{500, 20}; + std::vector out(20, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + ASSERT_EQ(sv.validation(), true); + sv.pre_processing(); + sv.run(); + sv.post_processing(); + for (int i = 0; i < in_index[1]; ++i) { + ASSERT_EQ(out[i], in_index[0]); + } +} + +TEST(sum_values_by_columns_matrix, check_rect_matrix3) { + std::vector in_index{500, 20}; + std::vector out(20, 0); + std::vector in = getRandomVector(in_index[0] * in_index[1]); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + ASSERT_EQ(sv.validation(), true); + sv.pre_processing(); + sv.run(); + sv.post_processing(); + std::vector check_answer(in_index[1], 0); + for (int i = 0; i < in_index[1]; ++i) { + for (int j = 0; j < in_index[0]; ++j) { + check_answer[i] += in[j * in_index[1] + i]; + } + } + for (int i = 0; i < in_index[1]; ++i) { + ASSERT_EQ(out[i], check_answer[i]); + } +} \ No newline at end of file diff --git a/tasks/seq/sadikov_I_sum_values_by_columns_matrix/include/sq_task.h b/tasks/seq/sadikov_I_sum_values_by_columns_matrix/include/sq_task.h new file mode 100644 index 00000000000..dec60968493 --- /dev/null +++ b/tasks/seq/sadikov_I_sum_values_by_columns_matrix/include/sq_task.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace sadikov_I_Sum_values_by_columns_matrix_seq { +std::shared_ptr CreateTaskData(std::vector &InV, const std::vector &CeV, + std::vector &OtV); +class MatrixTask : public ppc::core::Task { + private: + std::vector sum; + std::vector matrix; + size_t rows_count, columns_count; + + public: + explicit MatrixTask(std::shared_ptr td); + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + void calculate(size_t size); +}; +} // namespace sadikov_I_Sum_values_by_columns_matrix_seq \ No newline at end of file diff --git a/tasks/seq/sadikov_I_sum_values_by_columns_matrix/perf_tests/main.cpp b/tasks/seq/sadikov_I_sum_values_by_columns_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..97caf49706d --- /dev/null +++ b/tasks/seq/sadikov_I_sum_values_by_columns_matrix/perf_tests/main.cpp @@ -0,0 +1,65 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/sadikov_I_sum_values_by_columns_matrix/include/sq_task.h" + +TEST(Sadikov_sequential_perf_test, matrix_test_pipeline_run) { + int rows_count = 3000; + int columns_count = 3000; + std::vector in(rows_count * columns_count, 1); + std::vector in_index{rows_count, columns_count}; + std::vector out(columns_count, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + + auto testTaskSequential = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(in_index[1], out[0]); +} + +TEST(Sadikov_sequential_perf_test, matrix_test_run) { + int rows_count = 3000; + int columns_count = 3000; + std::vector in(rows_count * columns_count, 1); + std::vector in_index{rows_count, columns_count}; + std::vector out(columns_count, 0); + std::shared_ptr taskData = + sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData(in, in_index, out); + sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask sv(taskData); + + auto testTaskSequential = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(in_index[1], out[0]); +} \ No newline at end of file diff --git a/tasks/seq/sadikov_I_sum_values_by_columns_matrix/src/ops_seq.cpp b/tasks/seq/sadikov_I_sum_values_by_columns_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..05bae21c287 --- /dev/null +++ b/tasks/seq/sadikov_I_sum_values_by_columns_matrix/src/ops_seq.cpp @@ -0,0 +1,61 @@ +#include +#include + +#include "seq/sadikov_I_sum_values_by_columns_matrix/include/sq_task.h" + +using namespace std::chrono_literals; + +sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask::MatrixTask(std::shared_ptr td) + : Task(std::move(td)) {} + +bool sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask::validation() { + internal_order_test(); + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask::pre_processing() { + internal_order_test(); + rows_count = static_cast(taskData->inputs_count[0]); + columns_count = static_cast(taskData->inputs_count[1]); + matrix.reserve(rows_count * columns_count); + auto *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < columns_count; ++i) { + for (size_t j = 0; j < rows_count; ++j) { + matrix.emplace_back(tmp_ptr[j * columns_count + i]); + } + } + sum.reserve(rows_count); + return true; +} + +bool sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask::run() { + internal_order_test(); + calculate(columns_count); + return true; +} + +bool sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask::post_processing() { + internal_order_test(); + for (size_t i = 0; i < columns_count; ++i) { + reinterpret_cast(taskData->outputs[0])[i] = sum[i]; + } + return true; +} + +void sadikov_I_Sum_values_by_columns_matrix_seq::MatrixTask::calculate(size_t size) { + for (size_t i = 0; i < size; ++i) { + sum[i] = std::accumulate(matrix.begin() + static_cast(i * rows_count), + matrix.begin() + static_cast((i + 1) * rows_count), 0); + } +} + +std::shared_ptr sadikov_I_Sum_values_by_columns_matrix_seq::CreateTaskData( + std::vector &InV, const std::vector &CeV, std::vector &OtV) { + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(InV.data())); + taskData->inputs_count.emplace_back(CeV[0]); + taskData->inputs_count.emplace_back(CeV[1]); + taskData->outputs.emplace_back(reinterpret_cast(OtV.data())); + taskData->outputs_count.emplace_back(OtV.size()); + return taskData; +} From c0042330014dc480d4d49016029717a1ca6dd137 Mon Sep 17 00:00:00 2001 From: tyurinm <112872782+tyurinm@users.noreply.github.com> Date: Tue, 5 Nov 2024 03:39:32 +0300 Subject: [PATCH 110/155] =?UTF-8?q?=D0=A2=D1=8E=D1=80=D0=B8=D0=BD=20=D0=9C?= =?UTF-8?q?=D0=B8=D1=85=D0=B0=D0=B8=D0=BB.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2025.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D0=B5=D1=82=20=D1=87?= =?UTF-8?q?=D0=B8=D1=81=D0=BB=D0=B0=20=D0=BF=D1=80=D0=B5=D0=B4=D0=BB=D0=BE?= =?UTF-8?q?=D0=B6=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=B2=20=D1=81=D1=82=D1=80?= =?UTF-8?q?=D0=BE=D0=BA=D0=B5.=20(#130)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательная версия: На вход подается строка. Последовательный алгоритм проходит по строке и считает количество предложений, определяя их окончания по символам точки, восклицательного или вопросительного знака. Результатом является целое число — общее количество предложений в строке. Параллельная версия (MPI): Строка равномерно делится между всеми процессами с использованием MPI broadcast. Каждый процесс подсчитывает предложения в своей части строки, а затем результаты объединяются с помощью MPI reduce. Итоговый результат — общее количество предложений. --- .../func_tests/main.cpp | 261 ++++++++++++++++++ .../include/ops_mpi.hpp | 51 ++++ .../perf_tests/main.cpp | 104 +++++++ .../src/ops_mpi.cpp | 132 +++++++++ .../func_tests/main.cpp | 153 ++++++++++ .../include/ops_seq.hpp | 25 ++ .../perf_tests/main.cpp | 87 ++++++ .../src/ops_seq.cpp | 50 ++++ 8 files changed, 863 insertions(+) create mode 100644 tasks/mpi/tyurin_m_count_sentences_in_string/func_tests/main.cpp create mode 100644 tasks/mpi/tyurin_m_count_sentences_in_string/include/ops_mpi.hpp create mode 100644 tasks/mpi/tyurin_m_count_sentences_in_string/perf_tests/main.cpp create mode 100644 tasks/mpi/tyurin_m_count_sentences_in_string/src/ops_mpi.cpp create mode 100644 tasks/seq/tyurin_m_count_sentences_in_string/func_tests/main.cpp create mode 100644 tasks/seq/tyurin_m_count_sentences_in_string/include/ops_seq.hpp create mode 100644 tasks/seq/tyurin_m_count_sentences_in_string/perf_tests/main.cpp create mode 100644 tasks/seq/tyurin_m_count_sentences_in_string/src/ops_seq.cpp diff --git a/tasks/mpi/tyurin_m_count_sentences_in_string/func_tests/main.cpp b/tasks/mpi/tyurin_m_count_sentences_in_string/func_tests/main.cpp new file mode 100644 index 00000000000..717d88b2377 --- /dev/null +++ b/tasks/mpi/tyurin_m_count_sentences_in_string/func_tests/main.cpp @@ -0,0 +1,261 @@ +#include + +#include +#include +#include +#include +#include + +#include "mpi/tyurin_m_count_sentences_in_string/include/ops_mpi.hpp" + +TEST(tyurin_m_count_sentences_in_string_mpi, test_all_sentence_endings) { + boost::mpi::communicator world; + std::string input_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + input_str = "Hello world! How are you? I am fine."; + taskDataPar->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(1); + + auto testMpiTaskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential->validation(), true); + testMpiTaskSequential->pre_processing(); + testMpiTaskSequential->run(); + testMpiTaskSequential->post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(tyurin_m_count_sentences_in_string_mpi, test_no_sentence_endings) { + boost::mpi::communicator world; + std::string input_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + input_str = "This is a test without sentence endings"; + taskDataPar->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(1); + + auto testMpiTaskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential->validation(), true); + testMpiTaskSequential->pre_processing(); + testMpiTaskSequential->run(); + testMpiTaskSequential->post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(tyurin_m_count_sentences_in_string_mpi, test_mixed_content) { + boost::mpi::communicator world; + std::string input_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + input_str = "Sentence one. Another sentence! And another one? And one more."; + taskDataPar->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(1); + + auto testMpiTaskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential->validation(), true); + testMpiTaskSequential->pre_processing(); + testMpiTaskSequential->run(); + testMpiTaskSequential->post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(tyurin_m_count_sentences_in_string_mpi, test_empty_string) { + boost::mpi::communicator world; + std::string input_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + input_str = ""; + taskDataPar->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(1); + + auto testMpiTaskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential->validation(), true); + testMpiTaskSequential->pre_processing(); + testMpiTaskSequential->run(); + testMpiTaskSequential->post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(tyurin_m_count_sentences_in_string_mpi, test_multiple_consecutive_endings) { + boost::mpi::communicator world; + std::string input_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + input_str = "First sentence. Second sentence?! Third sentence."; + taskDataPar->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(1); + + auto testMpiTaskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential->validation(), true); + testMpiTaskSequential->pre_processing(); + testMpiTaskSequential->run(); + testMpiTaskSequential->post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(tyurin_m_count_sentences_in_string_mpi, test_various_whitespaces_between_sentences) { + boost::mpi::communicator world; + std::string input_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + input_str = "Sentence one. \nSecond sentence!\tThird sentence?"; + taskDataPar->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(1); + + auto testMpiTaskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential->validation(), true); + testMpiTaskSequential->pre_processing(); + testMpiTaskSequential->run(); + testMpiTaskSequential->post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} diff --git a/tasks/mpi/tyurin_m_count_sentences_in_string/include/ops_mpi.hpp b/tasks/mpi/tyurin_m_count_sentences_in_string/include/ops_mpi.hpp new file mode 100644 index 00000000000..8af2aff8049 --- /dev/null +++ b/tasks/mpi/tyurin_m_count_sentences_in_string/include/ops_mpi.hpp @@ -0,0 +1,51 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace tyurin_m_count_sentences_in_string_mpi { + +class SentenceCountTaskSequential : public ppc::core::Task { + public: + explicit SentenceCountTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_str_; + int sentence_count_ = 0; + + static bool is_sentence_end(char c); + static bool is_whitespace(char c); +}; + +class SentenceCountTaskParallel : public ppc::core::Task { + public: + explicit SentenceCountTaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_str_; + std::string local_input_; + int sentence_count_ = 0; + int local_sentence_count_ = 0; + + boost::mpi::communicator world; + + static bool is_sentence_end(char c); + static bool is_whitespace(char c); +}; + +} // namespace tyurin_m_count_sentences_in_string_mpi diff --git a/tasks/mpi/tyurin_m_count_sentences_in_string/perf_tests/main.cpp b/tasks/mpi/tyurin_m_count_sentences_in_string/perf_tests/main.cpp new file mode 100644 index 00000000000..eff335d6792 --- /dev/null +++ b/tasks/mpi/tyurin_m_count_sentences_in_string/perf_tests/main.cpp @@ -0,0 +1,104 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/tyurin_m_count_sentences_in_string/include/ops_mpi.hpp" + +const size_t count_strings = 10000; + +TEST(tyurin_m_count_sentences_in_string_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::string input_str; + std::vector global_count(1, 0); + std::string str; + + if (world.rank() == 0) { + str = "This is the first sentence. And this is the second! Finally, the third?"; + input_str.resize(str.size() * count_strings); + for (size_t i = 0; i < count_strings; i++) { + std::copy(str.begin(), str.end(), input_str.begin() + i * str.size()); + } + global_count[0] = 0; + } + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(30000, global_count[0]); + } +} + +TEST(tyurin_m_count_sentences_in_string_mpi, test_task_run) { + boost::mpi::communicator world; + std::string input_str; + std::vector global_count(1, 0); + std::string str; + + if (world.rank() == 0) { + str = "This is the first sentence. And this is the second! Finally, the third?"; + input_str.resize(str.size() * count_strings); + for (size_t i = 0; i < count_strings; i++) { + std::copy(str.begin(), str.end(), input_str.begin() + i * str.size()); + } + global_count[0] = 0; + } + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input_str)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(30000, global_count[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/tyurin_m_count_sentences_in_string/src/ops_mpi.cpp b/tasks/mpi/tyurin_m_count_sentences_in_string/src/ops_mpi.cpp new file mode 100644 index 00000000000..9f5c0522a2b --- /dev/null +++ b/tasks/mpi/tyurin_m_count_sentences_in_string/src/ops_mpi.cpp @@ -0,0 +1,132 @@ +#include "mpi/tyurin_m_count_sentences_in_string/include/ops_mpi.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskSequential::pre_processing() { + internal_order_test(); + input_str_ = *reinterpret_cast(taskData->inputs[0]); + sentence_count_ = 0; + return true; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskSequential::run() { + internal_order_test(); + + bool inside_sentence = false; + for (char c : input_str_) { + if (is_sentence_end(c)) { + if (inside_sentence) { + sentence_count_++; + inside_sentence = false; + } + } else if (!is_whitespace(c)) { + inside_sentence = true; + } + } + return true; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = sentence_count_; + return true; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskSequential::is_sentence_end(char c) { + return c == '.' || c == '!' || c == '?'; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskSequential::is_whitespace(char c) { + return c == ' ' || c == '\n' || c == '\t'; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + input_str_ = *reinterpret_cast(taskData->inputs[0]); + } + + local_sentence_count_ = 0; + sentence_count_ = 0; + + return true; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskParallel::run() { + internal_order_test(); + + size_t total_length{}; + if (world.rank() == 0) { + total_length = input_str_.size(); + } + boost::mpi::broadcast(world, total_length, 0); + + std::string local_segment; + size_t segment_size = total_length / world.size(); + size_t remainder = 0; + + if (world.rank() == 0) { + remainder = total_length % world.size(); + + for (int rank = 1; rank < world.size(); rank++) { + world.send(rank, 0, input_str_.data() + rank * segment_size + remainder, segment_size); + } + + local_segment.assign(input_str_, 0, segment_size + remainder); + } else { + local_segment.resize(segment_size); + world.recv(0, 0, local_segment.data(), segment_size); + } + + bool in_sentence = false; + + for (char character : local_segment) { + if (is_sentence_end(character)) { + if (in_sentence || character == local_segment.front()) { + local_sentence_count_++; + in_sentence = false; + } + } else if (!is_whitespace(character)) { + in_sentence = true; + } + } + + boost::mpi::reduce(world, local_sentence_count_, sentence_count_, std::plus<>(), 0); + + return true; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = sentence_count_; + } + + return true; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskParallel::is_sentence_end(char c) { + return c == '.' || c == '!' || c == '?'; +} + +bool tyurin_m_count_sentences_in_string_mpi::SentenceCountTaskParallel::is_whitespace(char c) { + return c == ' ' || c == '\n' || c == '\t'; +} diff --git a/tasks/seq/tyurin_m_count_sentences_in_string/func_tests/main.cpp b/tasks/seq/tyurin_m_count_sentences_in_string/func_tests/main.cpp new file mode 100644 index 00000000000..5e79136d7d6 --- /dev/null +++ b/tasks/seq/tyurin_m_count_sentences_in_string/func_tests/main.cpp @@ -0,0 +1,153 @@ +#include + +#include +#include + +#include "seq/tyurin_m_count_sentences_in_string/include/ops_seq.hpp" + +TEST(tyurin_m_count_sentences_in_string_seq, test_sentence_count_single_sentence) { + std::string input_str = "This is a single sentence."; + int expected_count = 1; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential sentenceCountTask(taskDataSeq); + ASSERT_EQ(sentenceCountTask.validation(), true); + sentenceCountTask.pre_processing(); + sentenceCountTask.run(); + sentenceCountTask.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(tyurin_m_count_sentences_in_string_seq, test_sentence_count_multiple_sentences) { + std::string input_str = "This is the first sentence. Here is another one! And yet another?"; + int expected_count = 3; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential sentenceCountTask(taskDataSeq); + ASSERT_EQ(sentenceCountTask.validation(), true); + sentenceCountTask.pre_processing(); + sentenceCountTask.run(); + sentenceCountTask.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(tyurin_m_count_sentences_in_string_seq, test_sentence_count_no_sentences) { + std::string input_str = "No sentence endings here"; + int expected_count = 0; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential sentenceCountTask(taskDataSeq); + ASSERT_EQ(sentenceCountTask.validation(), true); + sentenceCountTask.pre_processing(); + sentenceCountTask.run(); + sentenceCountTask.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(tyurin_m_count_sentences_in_string_seq, test_sentence_count_empty_string) { + std::string input_str; + int expected_count = 0; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential sentenceCountTask(taskDataSeq); + ASSERT_EQ(sentenceCountTask.validation(), true); + sentenceCountTask.pre_processing(); + sentenceCountTask.run(); + sentenceCountTask.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(tyurin_m_count_sentences_in_string_seq, test_multiple_consecutive_sentence_endings) { + std::string input_str = "This is a sentence... And another one?!"; + int expected_count = 2; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential sentenceCountTask(taskDataSeq); + ASSERT_EQ(sentenceCountTask.validation(), true); + sentenceCountTask.pre_processing(); + sentenceCountTask.run(); + sentenceCountTask.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(tyurin_m_count_sentences_in_string_seq, test_sentence_count_with_various_whitespaces) { + std::string input_str = "First sentence.\nSecond sentence!\tThird sentence?"; + int expected_count = 3; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential sentenceCountTask(taskDataSeq); + ASSERT_EQ(sentenceCountTask.validation(), true); + sentenceCountTask.pre_processing(); + sentenceCountTask.run(); + sentenceCountTask.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(tyurin_m_count_sentences_in_string_seq, test_only_sentence_endings) { + std::string input_str = "...?!"; + int expected_count = 0; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential sentenceCountTask(taskDataSeq); + ASSERT_EQ(sentenceCountTask.validation(), true); + sentenceCountTask.pre_processing(); + sentenceCountTask.run(); + sentenceCountTask.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} diff --git a/tasks/seq/tyurin_m_count_sentences_in_string/include/ops_seq.hpp b/tasks/seq/tyurin_m_count_sentences_in_string/include/ops_seq.hpp new file mode 100644 index 00000000000..1dcdaeafa03 --- /dev/null +++ b/tasks/seq/tyurin_m_count_sentences_in_string/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace tyurin_m_count_sentences_in_string_seq { + +class SentenceCountTaskSequential : public ppc::core::Task { + public: + explicit SentenceCountTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_str_; + int sentence_count_ = 0; + + static bool is_sentence_end(char c); + static bool is_whitespace(char c); +}; + +} // namespace tyurin_m_count_sentences_in_string_seq diff --git a/tasks/seq/tyurin_m_count_sentences_in_string/perf_tests/main.cpp b/tasks/seq/tyurin_m_count_sentences_in_string/perf_tests/main.cpp new file mode 100644 index 00000000000..858f9fbc8d3 --- /dev/null +++ b/tasks/seq/tyurin_m_count_sentences_in_string/perf_tests/main.cpp @@ -0,0 +1,87 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/tyurin_m_count_sentences_in_string/include/ops_seq.hpp" + +const size_t count_strings = 10000; + +TEST(tyurin_m_count_sentences_in_string_seq, test_pipeline_run) { + std::string str = "This is the first sentence. And this is the second! Finally, the third?"; + std::string input_str; + input_str.resize(str.size() * count_strings); + for (size_t i = 0; i < count_strings; i++) { + std::copy(str.begin(), str.end(), input_str.begin() + i * str.size()); + } + int expected_sentence_count = 30000; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto sentenceCountTask = + std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(sentenceCountTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_sentence_count, out[0]); +} + +TEST(tyurin_m_count_sentences_in_string_seq, test_task_run) { + std::string str = "This is the first sentence. And this is the second! Finally, the third?"; + std::string input_str; + input_str.resize(str.size() * count_strings); + for (size_t i = 0; i < count_strings; i++) { + std::copy(str.begin(), str.end(), input_str.begin() + i * str.size()); + } + int expected_sentence_count = 30000; + + std::vector in_str(1, input_str); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto sentenceCountTask = + std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(sentenceCountTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_sentence_count, out[0]); +} diff --git a/tasks/seq/tyurin_m_count_sentences_in_string/src/ops_seq.cpp b/tasks/seq/tyurin_m_count_sentences_in_string/src/ops_seq.cpp new file mode 100644 index 00000000000..63ecf00b616 --- /dev/null +++ b/tasks/seq/tyurin_m_count_sentences_in_string/src/ops_seq.cpp @@ -0,0 +1,50 @@ +#include "seq/tyurin_m_count_sentences_in_string/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential::pre_processing() { + internal_order_test(); + input_str_ = *reinterpret_cast(taskData->inputs[0]); + sentence_count_ = 0; + return true; +} + +bool tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1; +} + +bool tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential::run() { + internal_order_test(); + + bool inside_sentence = false; + + for (char c : input_str_) { + if (is_sentence_end(c)) { + if (inside_sentence) { + sentence_count_++; + inside_sentence = false; + } + } else if (!is_whitespace(c)) { + inside_sentence = true; + } + } + + return true; +} + +bool tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = sentence_count_; + return true; +} + +bool tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential::is_sentence_end(char c) { + return c == '.' || c == '!' || c == '?'; +} + +bool tyurin_m_count_sentences_in_string_seq::SentenceCountTaskSequential::is_whitespace(char c) { + return c == ' ' || c == '\n' || c == '\t'; +} \ No newline at end of file From 2b4d2fa3b1a8172db2efca7ebfde3331f8e106b9 Mon Sep 17 00:00:00 2001 From: Sergey Vasilev <112872559+venn2713@users.noreply.github.com> Date: Tue, 5 Nov 2024 03:40:41 +0300 Subject: [PATCH 111/155] =?UTF-8?q?=D0=92=D0=B0=D1=81=D0=B8=D0=BB=D1=8C?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=207.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB?= =?UTF-8?q?=D0=B5=D0=B5=20=D0=B1=D0=BB=D0=B8=D0=B7=D0=BA=D0=B8=D1=85=20?= =?UTF-8?q?=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB?= =?UTF-8?q?=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA?= =?UTF-8?q?=D1=82=D0=BE=D1=80=D0=B0.=20(#111)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Последовательная реализация - **Класс**: `FindClosestNeighborsSequential` - **Описание**: Последовательная версия алгоритма находит минимальную разницу между соседними элементами вектора. - **Алгоритм**: - В методе `pre_processing` вектор входных данных инициализируется из `taskData`, задается начальное значение `min_diff_` как максимально возможное, а также начальные индексы `index1_` и `index2_` устанавливаются в `-1`. - В методе `run` осуществляется проход по всем парам соседних элементов, вычисляется разница, и, если она меньше текущей минимальной разницы, обновляются `min_diff_`, `index1_` и `index2_`. - Метод `post_processing` записывает результат (минимальная разница и индексы) в выходной буфер. - **Результат**: Алгоритм возвращает минимальную разницу между соседними элементами и соответствующие индексы. ### Описание MPI реализации Класс: `FindClosestNeighborsParallelMPI` **Описание**: В параллельной версии с использованием MPI теперь используется `scatterv` для более эффективного распределения данных между процессами, улучшая производительность и упрощая управление границами подмассивов. **Алгоритм**: - **Распределение данных**: В `pre_processing` главный процесс (ранг 0) разбивает входной вектор на подмассивы, учитывая границы, чтобы каждый процесс мог корректно рассчитать минимальные разницы между соседними элементами. Вместо использования `send` и `recv` теперь данные передаются процессам с помощью `boost::mpi::scatterv`. - **Локальные вычисления**: В `run` каждый процесс вычисляет минимальную разницу для своего подмассива, обновляя `min_diff_`, `index1_` и `index2_` при нахождении меньшего значения. - **Сбор результатов**: После локальных вычислений каждый процесс отправляет свой результат главному процессу с помощью `boost::mpi::gather`. Главный процесс находит глобальный минимум среди всех локальных минимальных разниц. - **Запись результата**: В `post_processing` главный процесс записывает финальные значения `min_diff_`, `index1_`, и `index2_` в выходной буфер. **Используемые MPI-функции**: - `boost::mpi::broadcast` — для рассылки размера входного вектора всем процессам. - `boost::mpi::scatterv` — для эффективного распределения подмассивов с учетом границ каждому процессу. - `boost::mpi::gather` — для сбора локальных минимальных значений на главном процессе. **Результат**: Главный процесс (ранг 0) получает глобальную минимальную разницу между соседними элементами и их индексы, что гарантирует корректное вычисление на больших объемах данных. --- .../func_tests/main.cpp | 255 ++++++++++++++++++ .../include/ops_mpi.hpp | 75 ++++++ .../perf_tests/main.cpp | 133 +++++++++ .../src/ops_mpi.cpp | 172 ++++++++++++ .../func_tests/main.cpp | 133 +++++++++ .../include/ops_seq.hpp | 26 ++ .../perf_tests/main.cpp | 101 +++++++ .../src/ops_seq.cpp | 46 ++++ 8 files changed, 941 insertions(+) create mode 100644 tasks/mpi/vasilev_s_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/mpi/vasilev_s_nearest_neighbor_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/vasilev_s_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/vasilev_s_nearest_neighbor_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/vasilev_s_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/seq/vasilev_s_nearest_neighbor_elements/include/ops_seq.hpp create mode 100644 tasks/seq/vasilev_s_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/seq/vasilev_s_nearest_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/vasilev_s_nearest_neighbor_elements/func_tests/main.cpp b/tasks/mpi/vasilev_s_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..e0450fc8d9a --- /dev/null +++ b/tasks/mpi/vasilev_s_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,255 @@ +#include + +#include +#include +#include +#include + +#include "mpi/vasilev_s_nearest_neighbor_elements/include/ops_mpi.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::uniform_int_distribution<> dist(0, 1000); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + return vec; +} + +TEST(vasilev_s_nearest_neighbor_elements_mpi, test_small_vector) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_result(3, 0); // min_diff, index1, index2 + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {5, 3, 8, 7, 2}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsParallelMPI task_parallel(taskDataPar); + ASSERT_EQ(task_parallel.validation(), true); + task_parallel.pre_processing(); + task_parallel.run(); + task_parallel.post_processing(); + + if (world.rank() == 0) { + std::vector expected_result(3, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expected_result.data())); + taskDataSeq->outputs_count.emplace_back(expected_result.size()); + + vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsSequentialMPI task_sequential(taskDataSeq); + ASSERT_EQ(task_sequential.validation(), true); + task_sequential.pre_processing(); + task_sequential.run(); + task_sequential.post_processing(); + + ASSERT_EQ(global_result[0], expected_result[0]); // min_diff + ASSERT_EQ(global_result[1], expected_result[1]); // index1 + ASSERT_EQ(global_result[2], expected_result[2]); // index2 + } +} + +TEST(vasilev_s_nearest_neighbor_elements_mpi, test_random_vector) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_result(3, 0); // min_diff, index1, index2 + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1000; + global_vec = getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsParallelMPI task_parallel(taskDataPar); + ASSERT_EQ(task_parallel.validation(), true); + task_parallel.pre_processing(); + task_parallel.run(); + task_parallel.post_processing(); + + if (world.rank() == 0) { + std::vector expected_result(3, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expected_result.data())); + taskDataSeq->outputs_count.emplace_back(expected_result.size()); + + vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsSequentialMPI task_sequential(taskDataSeq); + ASSERT_EQ(task_sequential.validation(), true); + task_sequential.pre_processing(); + task_sequential.run(); + task_sequential.post_processing(); + + ASSERT_EQ(global_result[0], expected_result[0]); // min_diff + ASSERT_EQ(global_result[1], expected_result[1]); // index1 + ASSERT_EQ(global_result[2], expected_result[2]); // index2 + } +} + +TEST(vasilev_s_nearest_neighbor_elements_mpi, test_equal_elements) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_result(3, 0); // min_diff, index1, index2 + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {7, 7, 7, 7, 7}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsParallelMPI task_parallel(taskDataPar); + ASSERT_EQ(task_parallel.validation(), true); + task_parallel.pre_processing(); + task_parallel.run(); + task_parallel.post_processing(); + + if (world.rank() == 0) { + std::vector expected_result(3, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expected_result.data())); + taskDataSeq->outputs_count.emplace_back(expected_result.size()); + + vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsSequentialMPI task_sequential(taskDataSeq); + task_sequential.validation(); + task_sequential.pre_processing(); + task_sequential.run(); + task_sequential.post_processing(); + + ASSERT_EQ(global_result[0], expected_result[0]); // min_diff + ASSERT_EQ(global_result[1], expected_result[1]); // index1 + ASSERT_EQ(global_result[2], expected_result[2]); // index2 + } +} + +TEST(vasilev_s_nearest_neighbor_elements_mpi, test_negative_numbers) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_result(3, 0); // min_diff, index1, index2 + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {-10, -20, -15, -30, -25}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsParallelMPI task_parallel(taskDataPar); + task_parallel.validation(); + task_parallel.pre_processing(); + task_parallel.run(); + task_parallel.post_processing(); + + if (world.rank() == 0) { + std::vector expected_result(3, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expected_result.data())); + taskDataSeq->outputs_count.emplace_back(expected_result.size()); + + vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsSequentialMPI task_sequential(taskDataSeq); + ASSERT_EQ(task_sequential.validation(), true); + task_sequential.pre_processing(); + task_sequential.run(); + task_sequential.post_processing(); + + ASSERT_EQ(global_result[0], expected_result[0]); // min_diff + ASSERT_EQ(global_result[1], expected_result[1]); // index1 + ASSERT_EQ(global_result[2], expected_result[2]); // index2 + } +} + +TEST(LocalResultTest, OperatorLessThan) { + vasilev_s_nearest_neighbor_elements_mpi::LocalResult a{5, 10, 11}; + vasilev_s_nearest_neighbor_elements_mpi::LocalResult b{10, 5, 6}; + vasilev_s_nearest_neighbor_elements_mpi::LocalResult c{5, 9, 10}; + + EXPECT_TRUE(a < b); + EXPECT_FALSE(b < a); + EXPECT_FALSE(a < c); + EXPECT_TRUE(c < a); +} + +TEST(PartitionArrayTest, TestEqualPartitions) { + int amount = 10; + int num_partitions = 5; + auto result = vasilev_s_nearest_neighbor_elements_mpi::partitionArray(amount, num_partitions); + + std::vector expected_sizes = {3, 3, 3, 3, 2}; // равное распределение + std::vector expected_displs = {0, 2, 4, 6, 8}; // смещения + + ASSERT_EQ(result.first, expected_displs); + ASSERT_EQ(result.second, expected_sizes); +} + +TEST(PartitionArrayTest, TestUnequalPartitions) { + int amount = 10; + int num_partitions = 3; + auto result = vasilev_s_nearest_neighbor_elements_mpi::partitionArray(amount, num_partitions); + + std::vector expected_sizes = {4, 4, 4}; // неравномерное распределение + std::vector expected_displs = {0, 3, 6}; // смещения + + ASSERT_EQ(result.first, expected_displs); + ASSERT_EQ(result.second, expected_sizes); +} + +TEST(PartitionArrayTest, TestMorePartitionsThanElements) { + int amount = 3; + int num_partitions = 5; + auto result = vasilev_s_nearest_neighbor_elements_mpi::partitionArray(amount, num_partitions); + + std::vector expected_sizes = {2, 2, 0, 0, 0}; // части, превышающие количество элементов + std::vector expected_displs = {0, 1, -1, -1, -1}; // смещения с "пустыми" частями + + ASSERT_EQ(result.first, expected_displs); + ASSERT_EQ(result.second, expected_sizes); +} + +TEST(PartitionArrayTest, TestSinglePartition) { + int amount = 10; + int num_partitions = 1; + auto result = vasilev_s_nearest_neighbor_elements_mpi::partitionArray(amount, num_partitions); + + std::vector expected_sizes = {10}; // одна часть содержит все элементы + std::vector expected_displs = {0}; // одно смещение + + ASSERT_EQ(result.first, expected_displs); + ASSERT_EQ(result.second, expected_sizes); +} + +TEST(PartitionArrayTest, TestZeroElements) { + int amount = 0; + int num_partitions = 5; + auto result = vasilev_s_nearest_neighbor_elements_mpi::partitionArray(amount, num_partitions); + + std::vector expected_sizes = {0, 0, 0, 0, 0}; // все части пустые + std::vector expected_displs = {-1, -1, -1, -1, -1}; // все смещения отсутствуют + + ASSERT_EQ(result.first, expected_displs); + ASSERT_EQ(result.second, expected_sizes); +} \ No newline at end of file diff --git a/tasks/mpi/vasilev_s_nearest_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/vasilev_s_nearest_neighbor_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..025d95ab919 --- /dev/null +++ b/tasks/mpi/vasilev_s_nearest_neighbor_elements/include/ops_mpi.hpp @@ -0,0 +1,75 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace vasilev_s_nearest_neighbor_elements_mpi { + +struct LocalResult { + int min_diff; + int index1; + int index2; + + bool operator<(const LocalResult& other) const { + if (min_diff != other.min_diff) { + return min_diff < other.min_diff; + } + return index1 < other.index1; + } + + template + void serialize(Archive& ar, const unsigned int version) { + ar & min_diff; + ar & index1; + ar & index2; + } +}; + +std::vector getRandomVector(int sz); +std::pair, std::vector> partitionArray(int amount, int num_partitions); + +class FindClosestNeighborsSequentialMPI : public ppc::core::Task { + public: + explicit FindClosestNeighborsSequentialMPI(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int min_diff_{}; + int index1_{}; + int index2_{}; +}; + +class FindClosestNeighborsParallelMPI : public ppc::core::Task { + public: + explicit FindClosestNeighborsParallelMPI(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int rank_offset_; + int min_diff_ = std::numeric_limits::max(); + int index1_ = -1; + int index2_ = -1; + std::vector distribution; + std::vector displacement; + boost::mpi::communicator world; +}; + +} // namespace vasilev_s_nearest_neighbor_elements_mpi diff --git a/tasks/mpi/vasilev_s_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/vasilev_s_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..74d5674ddc3 --- /dev/null +++ b/tasks/mpi/vasilev_s_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,133 @@ +#include + +#include +#include +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/vasilev_s_nearest_neighbor_elements/include/ops_mpi.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::uniform_int_distribution<> dist(0, 1000); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + return vec; +} + +TEST(vasilev_s_nearest_neighbor_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_result(3, 0); // min_diff, index1, index2 + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 1000000; + global_vec = getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + auto taskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(taskParallel->validation(), true); + taskParallel->pre_processing(); + taskParallel->run(); + taskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(taskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + + std::vector reference_result(3, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + auto taskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(taskSequential->validation(), true); + taskSequential->pre_processing(); + taskSequential->run(); + taskSequential->post_processing(); + + ASSERT_EQ(global_result[0], reference_result[0]); // min_diff + ASSERT_EQ(global_result[1], reference_result[1]); // index1 + ASSERT_EQ(global_result[2], reference_result[2]); // index2 + } +} + +TEST(vasilev_s_nearest_neighbor_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_result(3, 0); // min_diff, index1, index2 + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 1000000; + global_vec = getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + auto taskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(taskParallel->validation(), true); + taskParallel->pre_processing(); + taskParallel->run(); + taskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(taskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + + std::vector reference_result(3, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + auto taskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(taskSequential->validation(), true); + taskSequential->pre_processing(); + taskSequential->run(); + taskSequential->post_processing(); + + ASSERT_EQ(global_result[0], reference_result[0]); // min_diff + ASSERT_EQ(global_result[1], reference_result[1]); // index1 + ASSERT_EQ(global_result[2], reference_result[2]); // index2 + } +} \ No newline at end of file diff --git a/tasks/mpi/vasilev_s_nearest_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/vasilev_s_nearest_neighbor_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..a1dc0c5219b --- /dev/null +++ b/tasks/mpi/vasilev_s_nearest_neighbor_elements/src/ops_mpi.cpp @@ -0,0 +1,172 @@ +#include "mpi/vasilev_s_nearest_neighbor_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +bool vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsSequentialMPI::pre_processing() { + internal_order_test(); + input_.resize(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + + min_diff_ = std::numeric_limits::max(); + index1_ = -1; + index2_ = -1; + return true; +} + +bool vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsSequentialMPI::validation() { + internal_order_test(); + return !taskData->inputs_count.empty() && taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 3; +} + +bool vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsSequentialMPI::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size() - 1; ++i) { + int diff = std::abs(input_[i + 1] - input_[i]); + if (diff < min_diff_) { + min_diff_ = diff; + index1_ = static_cast(i); + index2_ = static_cast(i + 1); + } + } + return true; +} + +bool vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsSequentialMPI::post_processing() { + internal_order_test(); + int* output_ptr = reinterpret_cast(taskData->outputs[0]); + output_ptr[0] = min_diff_; + output_ptr[1] = index1_; + output_ptr[2] = index2_; + return true; +} + +std::pair, std::vector> vasilev_s_nearest_neighbor_elements_mpi::partitionArray( + int amount, int num_partitions) { + std::vector displs(num_partitions); + std::vector sizes(num_partitions); + int total_elements = amount + num_partitions - 1; + int base_size = total_elements / num_partitions; + int extra_elements = total_elements % num_partitions; + + if (amount <= num_partitions) { + for (int i = 0; i < num_partitions; i++) { + if (i < amount - 1) { + sizes[i] = 2; + displs[i] = i; + } else { + sizes[i] = 0; + displs[i] = -1; + } + } + } else { + for (int i = 0; i < num_partitions; i++) { + if (extra_elements > 0) { + sizes[i] = base_size + 1; + extra_elements--; + } else { + sizes[i] = base_size; + } + + if (i == 0) { + displs[i] = 0; + } else { + displs[i] = displs[i - 1] + sizes[i - 1] - 1; + } + } + } + return {displs, sizes}; +} + +bool vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsParallelMPI::validation() { + internal_order_test(); + return world.rank() != 0 || taskData->inputs_count[0] > 1; +} + +bool vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsParallelMPI::pre_processing() { + internal_order_test(); + + rank_offset_ = 0; + + if (world.rank() == 0) { + min_diff_ = std::numeric_limits::max(); + index1_ = -1; + index2_ = -1; + std::tie(displacement, distribution) = + vasilev_s_nearest_neighbor_elements_mpi::partitionArray(taskData->inputs_count[0], world.size()); + } + + return true; +} + +bool vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsParallelMPI::run() { + internal_order_test(); + + unsigned int amount = 0; + + if (world.rank() == 0) { + amount = taskData->inputs_count[0]; + } + + boost::mpi::broadcast(world, amount, 0); + + boost::mpi::broadcast(world, displacement, 0); + boost::mpi::broadcast(world, distribution, 0); + + rank_offset_ = displacement[world.rank()]; + + input_.resize(distribution[world.rank()]); + if (world.rank() == 0) { + const auto* in_p = reinterpret_cast(taskData->inputs[0]); + boost::mpi::scatterv(world, in_p, distribution, displacement, input_.data(), distribution[0], 0); + } else { + boost::mpi::scatterv(world, input_.data(), distribution[world.rank()], 0); + } + + LocalResult local_result{std::numeric_limits::max(), -1, -1}; + const std::size_t size = input_.size(); + + if (size > 0) { + for (size_t i = 0; i < input_.size() - 1; ++i) { + int diff = std::abs(input_[i + 1] - input_[i]); + + int current_index1 = static_cast(rank_offset_ + i); + int current_index2 = static_cast(rank_offset_ + i + 1); + + if (diff < local_result.min_diff || (diff == local_result.min_diff && current_index1 < local_result.index1)) { + local_result.min_diff = diff; + local_result.index1 = current_index1; + local_result.index2 = current_index2; + } + } + } + + LocalResult global_result{std::numeric_limits::max(), -1, -1}; + boost::mpi::reduce(world, local_result, global_result, boost::mpi::minimum(), 0); + + if (world.rank() == 0) { + min_diff_ = global_result.min_diff; + index1_ = global_result.index1; + index2_ = global_result.index2; + } + return true; +} + +bool vasilev_s_nearest_neighbor_elements_mpi::FindClosestNeighborsParallelMPI::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = min_diff_; + reinterpret_cast(taskData->outputs[0])[1] = index1_; + reinterpret_cast(taskData->outputs[0])[2] = index2_; + } + + return true; +} diff --git a/tasks/seq/vasilev_s_nearest_neighbor_elements/func_tests/main.cpp b/tasks/seq/vasilev_s_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..de9a0a447d3 --- /dev/null +++ b/tasks/seq/vasilev_s_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,133 @@ +#include + +#include + +#include "seq/vasilev_s_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(vasilev_s_nearest_neighbor_elements_seq, Test_Small_Vector) { + std::vector input_vec = {5, 3, 8, 7, 2}; + std::vector output_result(3, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_vec.data())); + taskDataSeq->inputs_count.emplace_back(input_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_result.data())); + taskDataSeq->outputs_count.emplace_back(output_result.size()); + + vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential taskSequential(taskDataSeq); + ASSERT_EQ(taskSequential.validation(), true); + taskSequential.pre_processing(); + taskSequential.run(); + taskSequential.post_processing(); + + int expected_min_diff = 1; + int expected_index1 = 2; + int expected_index2 = 3; + + ASSERT_EQ(output_result[0], expected_min_diff); + ASSERT_EQ(output_result[1], expected_index1); + ASSERT_EQ(output_result[2], expected_index2); +} + +TEST(vasilev_s_nearest_neighbor_elements_seq, Test_Equal_Elements) { + std::vector input_vec = {7, 7, 7, 7, 7}; + std::vector output_result(3, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_vec.data())); + taskDataSeq->inputs_count.emplace_back(input_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_result.data())); + taskDataSeq->outputs_count.emplace_back(output_result.size()); + + vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential taskSequential(taskDataSeq); + ASSERT_EQ(taskSequential.validation(), true); + taskSequential.pre_processing(); + taskSequential.run(); + taskSequential.post_processing(); + + int expected_min_diff = 0; + int expected_index1 = 0; + int expected_index2 = 1; + + ASSERT_EQ(output_result[0], expected_min_diff); + ASSERT_EQ(output_result[1], expected_index1); + ASSERT_EQ(output_result[2], expected_index2); +} + +TEST(vasilev_s_nearest_neighbor_elements_seq, Test_Negative_Numbers) { + std::vector input_vec = {-10, -20, -15, -30, -25}; + std::vector output_result(3, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_vec.data())); + taskDataSeq->inputs_count.emplace_back(input_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_result.data())); + taskDataSeq->outputs_count.emplace_back(output_result.size()); + + vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential taskSequential(taskDataSeq); + ASSERT_EQ(taskSequential.validation(), true); + taskSequential.pre_processing(); + taskSequential.run(); + taskSequential.post_processing(); + + int expected_min_diff = 5; + int expected_index1 = 1; + int expected_index2 = 2; + + ASSERT_EQ(output_result[0], expected_min_diff); + ASSERT_EQ(output_result[1], expected_index1); + ASSERT_EQ(output_result[2], expected_index2); +} + +TEST(vasilev_s_nearest_neighbor_elements_seq, Test_Single_Element_Vector) { + std::vector input_vec = {42}; + std::vector output_result(3, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_vec.data())); + taskDataSeq->inputs_count.emplace_back(input_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_result.data())); + taskDataSeq->outputs_count.emplace_back(output_result.size()); + + vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential taskSequential(taskDataSeq); + ASSERT_EQ(taskSequential.validation(), false); +} + +TEST(vasilev_s_nearest_neighbor_elements_seq, Test_Empty_Vector) { + std::vector input_vec; + std::vector output_result(3, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(nullptr); + taskDataSeq->inputs_count.emplace_back(0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_result.data())); + taskDataSeq->outputs_count.emplace_back(output_result.size()); + + vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential taskSequential(taskDataSeq); + ASSERT_EQ(taskSequential.validation(), false); +} + +TEST(vasilev_s_nearest_neighbor_elements_seq, Test_Large_Vector) { + std::vector input_vec = {100, 95, 90, 85, 80, 75, 70, 65, 60, 55}; + std::vector output_result(3, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_vec.data())); + taskDataSeq->inputs_count.emplace_back(input_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_result.data())); + taskDataSeq->outputs_count.emplace_back(output_result.size()); + + vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential taskSequential(taskDataSeq); + ASSERT_EQ(taskSequential.validation(), true); + taskSequential.pre_processing(); + taskSequential.run(); + taskSequential.post_processing(); + + int expected_min_diff = 5; + int expected_index1 = 0; + int expected_index2 = 1; + + ASSERT_EQ(output_result[0], expected_min_diff); + ASSERT_EQ(output_result[1], expected_index1); + ASSERT_EQ(output_result[2], expected_index2); +} diff --git a/tasks/seq/vasilev_s_nearest_neighbor_elements/include/ops_seq.hpp b/tasks/seq/vasilev_s_nearest_neighbor_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..e6162005810 --- /dev/null +++ b/tasks/seq/vasilev_s_nearest_neighbor_elements/include/ops_seq.hpp @@ -0,0 +1,26 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace vasilev_s_nearest_neighbor_elements_seq { + +class FindClosestNeighborsSequential : public ppc::core::Task { + public: + explicit FindClosestNeighborsSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int min_diff_{}; + int index1_{}; + int index2_{}; +}; + +} // namespace vasilev_s_nearest_neighbor_elements_seq diff --git a/tasks/seq/vasilev_s_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/seq/vasilev_s_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..c7828fbfd00 --- /dev/null +++ b/tasks/seq/vasilev_s_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,101 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/vasilev_s_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(vasilev_s_nearest_neighbor_elements_seq, test_pipeline_run) { + std::vector input_vec; + std::vector output_result(3, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + int count_size_vector = 100000; + + input_vec.resize(count_size_vector); + for (int i = 0; i < count_size_vector; ++i) { + input_vec[i] = i; + } + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_vec.data())); + taskDataSeq->inputs_count.emplace_back(input_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_result.data())); + taskDataSeq->outputs_count.emplace_back(output_result.size()); + + auto taskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(taskSequential->validation(), true); + taskSequential->pre_processing(); + taskSequential->run(); + taskSequential->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + auto start_time = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&start_time] { + auto now = std::chrono::high_resolution_clock::now(); + std::chrono::duration elapsed = now - start_time; + return elapsed.count(); + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(taskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + int expected_min_diff = 1; + int expected_index1 = 0; + int expected_index2 = 1; + ASSERT_EQ(output_result[0], expected_min_diff); + ASSERT_EQ(output_result[1], expected_index1); + ASSERT_EQ(output_result[2], expected_index2); +} + +TEST(vasilev_s_nearest_neighbor_elements_seq, test_task_run) { + std::vector input_vec; + std::vector output_result(3, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + int count_size_vector = 100000; + + input_vec.resize(count_size_vector); + for (int i = 0; i < count_size_vector; ++i) { + input_vec[i] = i; + } + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_vec.data())); + taskDataSeq->inputs_count.emplace_back(input_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_result.data())); + taskDataSeq->outputs_count.emplace_back(output_result.size()); + + auto taskSequential = + std::make_shared(taskDataSeq); + ASSERT_EQ(taskSequential->validation(), true); + taskSequential->pre_processing(); + taskSequential->run(); + taskSequential->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + auto start_time = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&start_time] { + auto now = std::chrono::high_resolution_clock::now(); + std::chrono::duration elapsed = now - start_time; + return elapsed.count(); + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(taskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + int expected_min_diff = 1; + int expected_index1 = 0; + int expected_index2 = 1; + ASSERT_EQ(output_result[0], expected_min_diff); + ASSERT_EQ(output_result[1], expected_index1); + ASSERT_EQ(output_result[2], expected_index2); +} diff --git a/tasks/seq/vasilev_s_nearest_neighbor_elements/src/ops_seq.cpp b/tasks/seq/vasilev_s_nearest_neighbor_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..16ae615639f --- /dev/null +++ b/tasks/seq/vasilev_s_nearest_neighbor_elements/src/ops_seq.cpp @@ -0,0 +1,46 @@ +#include "seq/vasilev_s_nearest_neighbor_elements/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential::pre_processing() { + internal_order_test(); + input_.resize(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + + min_diff_ = std::numeric_limits::max(); + index1_ = -1; + index2_ = -1; + return true; +} + +bool vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential::validation() { + internal_order_test(); + return !taskData->inputs_count.empty() && taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 3; +} + +bool vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size() - 1; ++i) { + int diff = std::abs(input_[i + 1] - input_[i]); + if (diff < min_diff_) { + min_diff_ = diff; + index1_ = static_cast(i); + index2_ = static_cast(i + 1); + } + } + return true; +} + +bool vasilev_s_nearest_neighbor_elements_seq::FindClosestNeighborsSequential::post_processing() { + internal_order_test(); + int* output_ptr = reinterpret_cast(taskData->outputs[0]); + output_ptr[0] = min_diff_; + output_ptr[1] = index1_; + output_ptr[2] = index2_; + return true; +} From 24c84884dc35753fe2d4ca4abb46c6d77a179209 Mon Sep 17 00:00:00 2001 From: Mikhail Andreevich <113550385+StroganovM@users.noreply.github.com> Date: Tue, 5 Nov 2024 16:40:32 +0300 Subject: [PATCH 112/155] =?UTF-8?q?=D0=A1=D1=82=D1=80=D0=BE=D0=B3=D0=B0?= =?UTF-8?q?=D0=BD=D0=BE=D0=B2=20=D0=9C=D0=B8=D1=85=D0=B0=D0=B8=D0=BB.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2022.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0?= =?UTF-8?q?=20=D0=B1=D1=83=D0=BA=D0=B2=D0=B5=D0=BD=D0=BD=D1=8B=D1=85=20?= =?UTF-8?q?=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE=D0=B2=20=D0=B2=20?= =?UTF-8?q?=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.=20(#153)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Данный проект реализует задачу подсчета символов в строке двумя методами: последовательным и параллельным с использованием MPI. Цель проекта — сравнить производительность и корректность обоих подходов, протестировав их с использованием различных данных. Основные компоненты: Тесты: Для обоих методов используются тесты на базе gtest для оценки корректности и производительности реализации. Функции подсчета символов: Реализация включает функции для генерации случайных строк и непосредственного подсчета символов, а также классы для запуска задач в последовательной и параллельной среде. Цель: Проект демонстрирует преимущества и недостатки параллельной обработки по сравнению с последовательной, позволяя оценить, насколько MPI может ускорить выполнение задачи при обработке больших объемов данных. --- .../func_tests/main.cpp | 484 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 ++ .../perf_tests/main.cpp | 92 ++++ .../src/ops_mpi.cpp | 121 +++++ .../func_tests/main.cpp | 135 +++++ .../include/ops_seq.hpp | 26 + .../perf_tests/main.cpp | 85 +++ .../src/ops_seq.cpp | 42 ++ 8 files changed, 1032 insertions(+) create mode 100644 tasks/mpi/stroganov_m_count_symbols_in_string/func_tests/main.cpp create mode 100644 tasks/mpi/stroganov_m_count_symbols_in_string/include/ops_mpi.hpp create mode 100644 tasks/mpi/stroganov_m_count_symbols_in_string/perf_tests/main.cpp create mode 100644 tasks/mpi/stroganov_m_count_symbols_in_string/src/ops_mpi.cpp create mode 100644 tasks/seq/stroganov_m_count_symbols_in_string/func_tests/main.cpp create mode 100644 tasks/seq/stroganov_m_count_symbols_in_string/include/ops_seq.hpp create mode 100644 tasks/seq/stroganov_m_count_symbols_in_string/perf_tests/main.cpp create mode 100644 tasks/seq/stroganov_m_count_symbols_in_string/src/ops_seq.cpp diff --git a/tasks/mpi/stroganov_m_count_symbols_in_string/func_tests/main.cpp b/tasks/mpi/stroganov_m_count_symbols_in_string/func_tests/main.cpp new file mode 100644 index 00000000000..e179bbdfddb --- /dev/null +++ b/tasks/mpi/stroganov_m_count_symbols_in_string/func_tests/main.cpp @@ -0,0 +1,484 @@ +// Copyright 2024 Stroganov Mikhail +#include + +#include +#include +#include + +#include "mpi/stroganov_m_count_symbols_in_string/include/ops_mpi.hpp" +#include "mpi/stroganov_m_count_symbols_in_string/src/ops_mpi.cpp" + +TEST(stroganov_m_count_symbols_in_string_mpi, EmptyString) { + boost::mpi::communicator world; + std::string global_str; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(0, global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, StringWithoutLetter) { + boost::mpi::communicator world; + std::string global_str = "1234"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, StringWithOnlyLetter) { + boost::mpi::communicator world; + std::string global_str = "qwer"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, RandomString) { + boost::mpi::communicator world; + std::string global_str = getRandomStringForCountOfSymbols(); + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, MixedSymbolsString) { + boost::mpi::communicator world; + std::string global_str = "qwer1234"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 4); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, LongString) { + boost::mpi::communicator world; + std::string global_str(10000, 'a'); + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 10000); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, OneLetterSingleString) { + boost::mpi::communicator world; + std::string global_str = "q"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, TwoLetterSingleString) { + boost::mpi::communicator world; + std::string global_str = "qq"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, ThreeLetterSingleString) { + boost::mpi::communicator world; + std::string global_str = "qqq"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, FourLetterSingleString) { + boost::mpi::communicator world; + std::string global_str = "qqqq"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, SingleStringWithoutLetter) { + boost::mpi::communicator world; + std::string global_str = "1"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} diff --git a/tasks/mpi/stroganov_m_count_symbols_in_string/include/ops_mpi.hpp b/tasks/mpi/stroganov_m_count_symbols_in_string/include/ops_mpi.hpp new file mode 100644 index 00000000000..ae34a583518 --- /dev/null +++ b/tasks/mpi/stroganov_m_count_symbols_in_string/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +// Copyright 2024 Stroganov Mikhail +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace stroganov_m_count_symbols_in_string_mpi { + +int countOfSymbols(std::string& str); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int result{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}, local_input_{}; + int result{}; + boost::mpi::communicator world; +}; + +} // namespace stroganov_m_count_symbols_in_string_mpi diff --git a/tasks/mpi/stroganov_m_count_symbols_in_string/perf_tests/main.cpp b/tasks/mpi/stroganov_m_count_symbols_in_string/perf_tests/main.cpp new file mode 100644 index 00000000000..a23c8444fea --- /dev/null +++ b/tasks/mpi/stroganov_m_count_symbols_in_string/perf_tests/main.cpp @@ -0,0 +1,92 @@ +// Copyright 2024 Stroganov Mikhail +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/stroganov_m_count_symbols_in_string/include/ops_mpi.hpp" + +TEST(stroganov_m_count_symbols_in_string_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::string string = "string"; + std::string global_str; + for (int i = 0; i < 20000; i++) { + global_str += string; + } + + std::vector global_out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} + +TEST(stroganov_m_count_symbols_in_string_mpi, test_task_run) { + boost::mpi::communicator world; + std::string string = "string"; + std::string global_str; + for (int i = 0; i < 20000; i++) { + global_str += string; + } + + std::vector global_out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} diff --git a/tasks/mpi/stroganov_m_count_symbols_in_string/src/ops_mpi.cpp b/tasks/mpi/stroganov_m_count_symbols_in_string/src/ops_mpi.cpp new file mode 100644 index 00000000000..ef23aa2e23d --- /dev/null +++ b/tasks/mpi/stroganov_m_count_symbols_in_string/src/ops_mpi.cpp @@ -0,0 +1,121 @@ +// Copyright 2024 Stroganov Mikhail +#include "mpi/stroganov_m_count_symbols_in_string/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +int getRandomNumForCountOfSymbols(int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + return ((gen() % (max - min + 1)) + min); +} + +std::string getRandomStringForCountOfSymbols() { + std::string result; + std::string dictionary = "AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz1234567890"; + int str_len = getRandomNumForCountOfSymbols(1000, 20000); + for (int i = 0; i < str_len; i++) { + result += dictionary[getRandomNumForCountOfSymbols(0, dictionary.size() - 1)]; + } + return result; +} + +int stroganov_m_count_symbols_in_string_mpi::countOfSymbols(std::string& str) { + int result = 0; + size_t n = str.size(); + for (size_t i = 0; i < n; i++) { + if (isalpha(str[i]) != 0) { + result++; + } + } + return result; +} + +bool stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + result = 0; + return true; +} + +bool stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + bool valid_len = (taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1); + bool is_char_array = typeid(*taskData->inputs[0]).name() == typeid(uint8_t).name(); + return valid_len && is_char_array; +} + +bool stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential::run() { + internal_order_test(); + result = countOfSymbols(input_); + return true; +} + +bool stroganov_m_count_symbols_in_string_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} + +bool stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + result = 0; + return true; +} + +bool stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + bool valid_input = taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1; + bool is_char_array = typeid(*taskData->inputs[0]).name() == typeid(uint8_t).name(); + return valid_input && is_char_array; + } + return true; +} + +bool stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int partition_size = 0; + if (world.rank() == 0) { + partition_size = (taskData->inputs_count[0] + world.size() - 1) / world.size(); + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + for (int proc = 1; proc < world.size(); ++proc) { + unsigned int start_idx = proc * partition_size; + if (start_idx >= input_.size()) { + world.send(proc, 0, 0); + continue; + } + unsigned int size_to_send = + (start_idx + partition_size > input_.size()) ? input_.size() - start_idx : partition_size; + world.send(proc, 0, size_to_send); + world.send(proc, 0, input_.data() + start_idx, size_to_send); + } + local_input_ = input_.substr(0, partition_size); + } else { + unsigned int received_size = 0; + world.recv(0, 0, received_size); + if (received_size > 0) { + std::vector buffer(received_size); + world.recv(0, 0, buffer.data(), received_size); + local_input_ = std::string(buffer.data(), buffer.size()); + } else { + local_input_.clear(); + } + } + int local_result = 0; + local_result = countOfSymbols(local_input_); + reduce(world, local_result, result, std::plus(), 0); + return true; +} + +bool stroganov_m_count_symbols_in_string_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } + return true; +} diff --git a/tasks/seq/stroganov_m_count_symbols_in_string/func_tests/main.cpp b/tasks/seq/stroganov_m_count_symbols_in_string/func_tests/main.cpp new file mode 100644 index 00000000000..8ecebcf818e --- /dev/null +++ b/tasks/seq/stroganov_m_count_symbols_in_string/func_tests/main.cpp @@ -0,0 +1,135 @@ +// Copyright 2024 Stroganov Mikhail +#include + +#include + +#include "seq/stroganov_m_count_symbols_in_string/include/ops_seq.hpp" + +TEST(stroganov_m_count_symbols_in_string_seq, EmptyString) { + std::string str; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(stroganov_m_count_symbols_in_string_seq, StringWithoutLetter) { + std::string str = "123123123123/"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(stroganov_m_count_symbols_in_string_seq, StringWithOneLetter) { + std::string str = "123q321"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + stroganov_m_count_symbols_in_string_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(stroganov_m_count_symbols_in_string_seq, TestString_Strogan21) { + std::string str = "Strogan21"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + stroganov_m_count_symbols_in_string_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(7, out[0]); +} + +TEST(stroganov_m_count_symbols_in_string_seq, StringWithOnlySpecialCharacters) { + std::string str = "!@#$%"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + stroganov_m_count_symbols_in_string_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(stroganov_m_count_symbols_in_string_seq, StringWithSpaces) { + std::string str = "Hello World"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + stroganov_m_count_symbols_in_string_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(10, out[0]); +} diff --git a/tasks/seq/stroganov_m_count_symbols_in_string/include/ops_seq.hpp b/tasks/seq/stroganov_m_count_symbols_in_string/include/ops_seq.hpp new file mode 100644 index 00000000000..2714a14c6e3 --- /dev/null +++ b/tasks/seq/stroganov_m_count_symbols_in_string/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2024 Stroganov Mikhail +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace stroganov_m_count_symbols_in_string_seq { + +int countSymbols(std::string& str); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int result{}; +}; + +} // namespace stroganov_m_count_symbols_in_string_seq diff --git a/tasks/seq/stroganov_m_count_symbols_in_string/perf_tests/main.cpp b/tasks/seq/stroganov_m_count_symbols_in_string/perf_tests/main.cpp new file mode 100644 index 00000000000..1461b41491e --- /dev/null +++ b/tasks/seq/stroganov_m_count_symbols_in_string/perf_tests/main.cpp @@ -0,0 +1,85 @@ +// Copyright 2024 Stroganov Mikhail +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/stroganov_m_count_symbols_in_string/include/ops_seq.hpp" + +TEST(stroganov_m_count_symbols_in_string_seq_perf_test, test_pipeline_run) { + std::string string = "string"; + std::string str; + for (int i = 0; i < 20000; i++) { + str += string; + } + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} + +TEST(stroganov_m_count_symbols_in_string_seq, test_task_run) { + std::string string = "string"; + std::string str; + for (int i = 0; i < 20000; i++) { + str += string; + } + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} diff --git a/tasks/seq/stroganov_m_count_symbols_in_string/src/ops_seq.cpp b/tasks/seq/stroganov_m_count_symbols_in_string/src/ops_seq.cpp new file mode 100644 index 00000000000..df58ef28311 --- /dev/null +++ b/tasks/seq/stroganov_m_count_symbols_in_string/src/ops_seq.cpp @@ -0,0 +1,42 @@ +// Copyright 2024 Stroganov Mikhail +#include "seq/stroganov_m_count_symbols_in_string/include/ops_seq.hpp" + +#include +#include +#include + +int stroganov_m_count_symbols_in_string_seq::countSymbols(std::string& str) { + int result = 0; + size_t n = str.size(); + for (size_t i = 0; i < n; i++) { + if (isalpha(str[i]) != 0) { + result++; + } + } + return result; +} + +bool stroganov_m_count_symbols_in_string_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + result = 0; + return true; +} + +bool stroganov_m_count_symbols_in_string_seq::TestTaskSequential::validation() { + internal_order_test(); + return (taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1); +} + +bool stroganov_m_count_symbols_in_string_seq::TestTaskSequential::run() { + internal_order_test(); + result = countSymbols(input_); + return true; +} + +bool stroganov_m_count_symbols_in_string_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} From ec8c457af139b977866acf2c03cc210b569f8d63 Mon Sep 17 00:00:00 2001 From: novi40k1 <144904163+novi40k1@users.noreply.github.com> Date: Wed, 6 Nov 2024 03:17:00 +0300 Subject: [PATCH 113/155] =?UTF-8?q?=D0=91=D0=B5=D1=80=D0=B5=D1=81=D0=BD?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=90=D0=BD=D1=82=D0=BE=D0=BD.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2018.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB?= =?UTF-8?q?=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE=D0=BB=D0=B1?= =?UTF-8?q?=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B?= =?UTF-8?q?.=20(#116)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit seq: ищу минимумы перебором mpi: отдаю процессам равное количество столбцов, кроме основного (ему +остаток). Они находят минимумы для своих столбцов и отправляют в глобальный ответ. --- .../func_tests/main.cpp | 466 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 ++ .../perf_tests/main.cpp | 110 +++++ .../src/ops_mpi.cpp | 132 +++++ .../func_tests/main.cpp | 361 ++++++++++++++ .../include/ops_seq.hpp | 23 + .../perf_tests/main.cpp | 124 +++++ .../src/ops_seq.cpp | 42 ++ 8 files changed, 1306 insertions(+) create mode 100644 tasks/mpi/beresnev_a_min_values_by_matrix_columns/func_tests/main.cpp create mode 100644 tasks/mpi/beresnev_a_min_values_by_matrix_columns/include/ops_mpi.hpp create mode 100644 tasks/mpi/beresnev_a_min_values_by_matrix_columns/perf_tests/main.cpp create mode 100644 tasks/mpi/beresnev_a_min_values_by_matrix_columns/src/ops_mpi.cpp create mode 100644 tasks/seq/beresnev_a_min_values_by_matrix_columns/func_tests/main.cpp create mode 100644 tasks/seq/beresnev_a_min_values_by_matrix_columns/include/ops_seq.hpp create mode 100644 tasks/seq/beresnev_a_min_values_by_matrix_columns/perf_tests/main.cpp create mode 100644 tasks/seq/beresnev_a_min_values_by_matrix_columns/src/ops_seq.cpp diff --git a/tasks/mpi/beresnev_a_min_values_by_matrix_columns/func_tests/main.cpp b/tasks/mpi/beresnev_a_min_values_by_matrix_columns/func_tests/main.cpp new file mode 100644 index 00000000000..8aa9674d39a --- /dev/null +++ b/tasks/mpi/beresnev_a_min_values_by_matrix_columns/func_tests/main.cpp @@ -0,0 +1,466 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include +#include + +#include "mpi/beresnev_a_min_values_by_matrix_columns/include/ops_mpi.hpp" + +static std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +static std::vector transpose(const std::vector &data, int n, int m) { + std::vector transposed(m * n); + + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + transposed[j * n + i] = data[i * m + j]; + } + } + + return transposed; +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Empty_Input_0) { + boost::mpi::communicator world; + const int N = 0; + const int M = 3; + + std::vector in(N * M, 0); + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(0); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), false); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Empty_Input_1) { + boost::mpi::communicator world; + const int N = 6; + const int M = 0; + + std::vector in{}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), false); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Wrong_Size_0) { + boost::mpi::communicator world; + const int N = -2; + const int M = 3; + + std::vector in{10, 1, 2, -1, -100, 2}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), false); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Wrong_Size_1) { + boost::mpi::communicator world; + const int N = 2; + const int M = 312; + + std::vector in{10, 1, 2, -1, -100, 2}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), false); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Test_Identity_Matrix) { + boost::mpi::communicator world; + const int N = 1; + const int M = 1; + + std::vector in{10}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + std::vector gold{10}; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(gold, out); + } +} +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Test_Base_0) { + boost::mpi::communicator world; + const int N = 2; + const int M = 3; + + std::vector in{10, 1, 2, -1, -100, 2}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + const std::vector gold{-1, -100, 2}; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + in = transpose(in, N, M); + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(gold, out); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Test_Base_1) { + boost::mpi::communicator world; + const int N = 100; + const int M = 100; + + std::vector in; + std::vector tr; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + in = getRandomVector(N * M); + tr = transpose(in, N, M); + taskDataPar->inputs.emplace_back(reinterpret_cast(tr.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference(M, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&reference)); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference, out); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Test_Base_2) { + boost::mpi::communicator world; + const int N = 43; + const int M = 563; + + std::vector in; + std::vector tr; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + in = getRandomVector(N * M); + tr = transpose(in, N, M); + taskDataPar->inputs.emplace_back(reinterpret_cast(tr.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference(M, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&reference)); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference, out); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Test_Base_3) { + boost::mpi::communicator world; + const int N = 908; + const int M = 510; + + std::vector in; + std::vector tr; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + in = getRandomVector(N * M); + tr = transpose(in, N, M); + taskDataPar->inputs.emplace_back(reinterpret_cast(tr.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference(M, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&reference)); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference, out); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Test_Base_4) { + boost::mpi::communicator world; + const int N = 1; + const int M = 1000; + + std::vector in; + std::vector tr; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + in = getRandomVector(N * M); + tr = transpose(in, N, M); + taskDataPar->inputs.emplace_back(reinterpret_cast(tr.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference(M, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&reference)); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference, out); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, Test_Base_5) { + boost::mpi::communicator world; + const int N = 1000; + const int M = 1000; + + std::vector in; + std::vector tr; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + in = getRandomVector(N * M); + tr = transpose(in, N, M); + taskDataPar->inputs.emplace_back(reinterpret_cast(tr.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference(M, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&reference)); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference, out); + } +} \ No newline at end of file diff --git a/tasks/mpi/beresnev_a_min_values_by_matrix_columns/include/ops_mpi.hpp b/tasks/mpi/beresnev_a_min_values_by_matrix_columns/include/ops_mpi.hpp new file mode 100644 index 00000000000..256696808fa --- /dev/null +++ b/tasks/mpi/beresnev_a_min_values_by_matrix_columns/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace beresnev_a_min_values_by_matrix_columns_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int n_{}, m_{}; + std::vector input_, res_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector local_input_; + std::vector local_mins_; + std::vector global_mins_; + int n_, m_; + int col_on_pr; + int remainder; + boost::mpi::communicator world; + boost::mpi::request req; +}; + +} // namespace beresnev_a_min_values_by_matrix_columns_mpi \ No newline at end of file diff --git a/tasks/mpi/beresnev_a_min_values_by_matrix_columns/perf_tests/main.cpp b/tasks/mpi/beresnev_a_min_values_by_matrix_columns/perf_tests/main.cpp new file mode 100644 index 00000000000..81aaff02bd0 --- /dev/null +++ b/tasks/mpi/beresnev_a_min_values_by_matrix_columns/perf_tests/main.cpp @@ -0,0 +1,110 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/beresnev_a_min_values_by_matrix_columns/include/ops_mpi.hpp" + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, test_pipeline_run) { + boost::mpi::communicator world; + const int N = 1000; + const int M = 1000; + + std::vector in; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + std::vector gold(M, 0); + gold[0] = -100; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + in = std::vector(N * M, 0); + in[0] = -100; + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out, gold); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_mpi, test_task_run) { + boost::mpi::communicator world; + const int N = 1000; + const int M = 1000; + + std::vector in; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + std::vector gold(M, 0); + gold[0] = -100; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + in = std::vector(N * M, 0); + in[0] = -100; + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataPar->inputs_count.emplace_back(n.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataPar->inputs_count.emplace_back(m.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out, gold); + } +} \ No newline at end of file diff --git a/tasks/mpi/beresnev_a_min_values_by_matrix_columns/src/ops_mpi.cpp b/tasks/mpi/beresnev_a_min_values_by_matrix_columns/src/ops_mpi.cpp new file mode 100644 index 00000000000..919f1af9c5d --- /dev/null +++ b/tasks/mpi/beresnev_a_min_values_by_matrix_columns/src/ops_mpi.cpp @@ -0,0 +1,132 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/beresnev_a_min_values_by_matrix_columns/include/ops_mpi.hpp" + +#include +#include +#include + +bool beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = reinterpret_cast*>(taskData->inputs[0])[0]; + res_ = reinterpret_cast*>(taskData->outputs[0])[0]; + n_ = reinterpret_cast(taskData->inputs[1])[0]; + m_ = reinterpret_cast(taskData->inputs[2])[0]; + return true; +} + +bool beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && + taskData->inputs_count[0] == reinterpret_cast*>(taskData->inputs[0])[0].size() && + taskData->inputs_count[0] == static_cast(reinterpret_cast(taskData->inputs[1])[0]) * + static_cast(reinterpret_cast(taskData->inputs[2])[0]) && + taskData->outputs_count[0] == reinterpret_cast*>(taskData->outputs[0])[0].size(); +} + +bool beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (int i = 0; i < m_; i++) { + int min = input_[i]; + for (int j = 1; j < n_; j++) { + if (input_[j * m_ + i] < min) { + min = input_[j * m_ + i]; + } + } + res_[i] = min; + } + return true; +} + +bool beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast*>(taskData->outputs[0])[0] = res_; + return true; +} + +bool beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + n_ = reinterpret_cast(taskData->inputs[1])[0]; + m_ = reinterpret_cast(taskData->inputs[2])[0]; + col_on_pr = m_ / world.size(); + remainder = m_ % world.size(); + int total_elements = taskData->inputs_count[0]; + input_.resize(total_elements); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < total_elements; i++) { + input_[i] = tmp_ptr[i]; + } + } + + return true; +} + +bool beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] > 0 && + taskData->inputs_count[0] == static_cast(reinterpret_cast(taskData->inputs[1])[0]) * + static_cast(reinterpret_cast(taskData->inputs[2])[0]) && + taskData->outputs_count[0] == (uint32_t) reinterpret_cast(taskData->inputs[2])[0] && + taskData->inputs_count[1] == 1 && taskData->inputs_count[2] == 1; + } + return true; +} + +bool beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + req = world.isend(proc, 0, input_.data() + (col_on_pr * proc + remainder) * n_, n_ * col_on_pr); + } + } + + broadcast(world, col_on_pr, 0); + broadcast(world, remainder, 0); + broadcast(world, n_, 0); + broadcast(world, m_, 0); + + local_input_ = std::vector(n_ * col_on_pr); + local_mins_ = std::vector(col_on_pr); + global_mins_ = std::vector(m_, 0); + + if (world.rank() == 0) { + req.wait(); + local_input_ = std::vector(input_.begin(), input_.begin() + (col_on_pr + remainder) * n_); + local_mins_.resize(col_on_pr + remainder); + } else { + world.recv(0, 0, local_input_.data(), local_input_.size()); + } + + if (world.rank() == 0) { + for (int i = 0; i < col_on_pr + remainder; i++) { + local_mins_[i] = *std::min_element(local_input_.begin() + n_ * i, local_input_.begin() + n_ * (i + 1)); + } + } else { + for (int i = 0; i < col_on_pr; i++) { + local_mins_[i] = *std::min_element(local_input_.begin() + n_ * i, local_input_.begin() + n_ * (i + 1)); + } + } + + std::vector sizes(world.size(), col_on_pr); + sizes[0] += remainder; + + boost::mpi::gatherv(world, local_mins_, global_mins_.data(), sizes, 0); + + return true; +} + +bool beresnev_a_min_values_by_matrix_columns_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + auto* output_ptr = reinterpret_cast(taskData->outputs[0]); + for (size_t j = 0; j < global_mins_.size(); j++) { + output_ptr[j] = global_mins_[j]; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/beresnev_a_min_values_by_matrix_columns/func_tests/main.cpp b/tasks/seq/beresnev_a_min_values_by_matrix_columns/func_tests/main.cpp new file mode 100644 index 00000000000..5fe69e2df6e --- /dev/null +++ b/tasks/seq/beresnev_a_min_values_by_matrix_columns/func_tests/main.cpp @@ -0,0 +1,361 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "seq/beresnev_a_min_values_by_matrix_columns/include/ops_seq.hpp" + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Empty_Input_0) { + const int N = 0; + const int M = 3; + + std::vector in(N * M, 0); + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(0); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Empty_Input_1) { + const int N = 6; + const int M = 0; + + std::vector in{10, 1, 2, -1, -100, 2}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Wrong_Size_0) { + const int N = -2; + const int M = 3; + + std::vector in{10, 1, 2, -1, -100, 2}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Wrong_Size_1) { + const int N = 2; + const int M = 312; + + std::vector in{10, 1, 2, -1, -100, 2}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Test_Identity_Matrix) { + const int N = 1; + const int M = 1; + + std::vector in{10}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + std::vector gold{10}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(gold, out); +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Test_Base_0) { + const int N = 2; + const int M = 3; + + std::vector in{10, 1, 2, -1, -100, 2}; + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + const std::vector gold{-1, -100, 2}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(gold, out); +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Test_Base_1) { + const int N = 100; + const int M = 100; + + std::srand(static_cast(std::time(nullptr))); + + std::vector in(N * M); + for (int i = 0; i < N * M; ++i) { + in[i] = std::rand() % 200 - 100; + } + + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + for (int i = 0; i < M; ++i) { + int expectedMin = in[i]; + for (int j = 1; j < N; ++j) { + int currentValue = in[j * M + i]; + if (currentValue < expectedMin) { + expectedMin = currentValue; + } + } + ASSERT_EQ(out[i], expectedMin); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Test_Base_2) { + const int N = 10000; + const int M = 1; + + std::srand(static_cast(std::time(nullptr))); + + std::vector in(N * M); + for (int i = 0; i < N * M; ++i) { + in[i] = std::rand() % 200 - 100; + } + + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + int expectedMin = in[0]; + for (int j = 1; j < N; ++j) { + int currentValue = in[j * M]; + if (currentValue < expectedMin) { + expectedMin = currentValue; + } + } + ASSERT_EQ(out[0], expectedMin); +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Test_Base_3) { + const int N = 1; + const int M = 10000; + + std::srand(static_cast(std::time(nullptr))); + + std::vector in(N * M); + for (int i = 0; i < N * M; ++i) { + in[i] = std::rand() % 200 - 100; + } + + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(in, out); +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Test_Base_4) { + const std::uint32_t N = 332; + const std::uint32_t M = 875; + + std::srand(static_cast(std::time(nullptr))); + + std::vector in(N * M); + for (std::uint32_t i = 0; i < N * M; ++i) { + in[i] = std::rand() % 2000 - 1000; + } + + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + for (std::uint32_t i = 0; i < M; ++i) { + int expectedMin = in[i]; + for (std::uint32_t j = 1; j < N; ++j) { + int currentValue = in[j * M + i]; + if (currentValue < expectedMin) { + expectedMin = currentValue; + } + } + ASSERT_EQ(out[i], expectedMin); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, Test_Base_5) { + const std::uint32_t N = 9271; + const std::uint32_t M = 682; + + std::srand(static_cast(std::time(nullptr))); + + std::vector in(N * M); + for (std::uint32_t i = 0; i < N * M; ++i) { + in[i] = std::rand() % 2000 - 1000; + } + + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + for (std::uint32_t i = 0; i < M; ++i) { + int expectedMin = in[i]; + for (std::uint32_t j = 1; j < N; ++j) { + int currentValue = in[j * M + i]; + if (currentValue < expectedMin) { + expectedMin = currentValue; + } + } + ASSERT_EQ(out[i], expectedMin); + } +} \ No newline at end of file diff --git a/tasks/seq/beresnev_a_min_values_by_matrix_columns/include/ops_seq.hpp b/tasks/seq/beresnev_a_min_values_by_matrix_columns/include/ops_seq.hpp new file mode 100644 index 00000000000..b67710ef89f --- /dev/null +++ b/tasks/seq/beresnev_a_min_values_by_matrix_columns/include/ops_seq.hpp @@ -0,0 +1,23 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace beresnev_a_min_values_by_matrix_columns_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int n_{}, m_{}; + std::vector input_, res_; +}; + +} // namespace beresnev_a_min_values_by_matrix_columns_seq \ No newline at end of file diff --git a/tasks/seq/beresnev_a_min_values_by_matrix_columns/perf_tests/main.cpp b/tasks/seq/beresnev_a_min_values_by_matrix_columns/perf_tests/main.cpp new file mode 100644 index 00000000000..036d0cad15f --- /dev/null +++ b/tasks/seq/beresnev_a_min_values_by_matrix_columns/perf_tests/main.cpp @@ -0,0 +1,124 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/beresnev_a_min_values_by_matrix_columns/include/ops_seq.hpp" + +TEST(beresnev_a_min_values_by_matrix_columns_seq, test_pipeline_run) { + const std::uint32_t N = 2000; + const std::uint32_t M = 10000; + + std::srand(static_cast(std::time(nullptr))); + + std::vector in(N * M); + for (std::uint32_t i = 0; i < N * M; ++i) { + in[i] = std::rand() % 2000 - 1000; + } + + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (std::uint32_t i = 0; i < M; ++i) { + int expectedMin = in[i]; + for (std::uint32_t j = 1; j < N; ++j) { + int currentValue = in[j * M + i]; + if (currentValue < expectedMin) { + expectedMin = currentValue; + } + } + ASSERT_EQ(out[i], expectedMin); + } +} + +TEST(beresnev_a_min_values_by_matrix_columns_seq, test_task_run) { + const std::uint32_t N = 2000; + const std::uint32_t M = 10000; + + std::srand(static_cast(std::time(nullptr))); + + std::vector in(N * M); + for (std::uint32_t i = 0; i < N * M; ++i) { + in[i] = std::rand() % 2000 - 1000; + } + + std::vector out(M, 0); + std::vector n(1, N); + std::vector m(1, M); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&in)); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(n.data())); + taskDataSeq->inputs_count.emplace_back(n.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(m.data())); + taskDataSeq->inputs_count.emplace_back(m.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (std::uint32_t i = 0; i < M; ++i) { + int expectedMin = in[i]; + for (std::uint32_t j = 1; j < N; ++j) { + int currentValue = in[j * M + i]; + if (currentValue < expectedMin) { + expectedMin = currentValue; + } + } + ASSERT_EQ(out[i], expectedMin); + } +} \ No newline at end of file diff --git a/tasks/seq/beresnev_a_min_values_by_matrix_columns/src/ops_seq.cpp b/tasks/seq/beresnev_a_min_values_by_matrix_columns/src/ops_seq.cpp new file mode 100644 index 00000000000..40362a2c278 --- /dev/null +++ b/tasks/seq/beresnev_a_min_values_by_matrix_columns/src/ops_seq.cpp @@ -0,0 +1,42 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/beresnev_a_min_values_by_matrix_columns/include/ops_seq.hpp" + +bool beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = reinterpret_cast*>(taskData->inputs[0])[0]; + res_ = reinterpret_cast*>(taskData->outputs[0])[0]; + n_ = reinterpret_cast(taskData->inputs[1])[0]; + m_ = reinterpret_cast(taskData->inputs[2])[0]; + return true; +} + +bool beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && + taskData->inputs_count[0] == reinterpret_cast*>(taskData->inputs[0])[0].size() && + taskData->inputs_count[0] == static_cast(reinterpret_cast(taskData->inputs[1])[0]) * + static_cast(reinterpret_cast(taskData->inputs[2])[0]) && + taskData->outputs_count[0] == reinterpret_cast*>(taskData->outputs[0])[0].size(); +} + +bool beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential::run() { + internal_order_test(); + for (int i = 0; i < m_; i++) { + int min = input_[i]; + for (int j = 1; j < n_; j++) { + if (input_[j * m_ + i] < min) { + min = input_[j * m_ + i]; + } + } + res_[i] = min; + } + return true; +} + +bool beresnev_a_min_values_by_matrix_columns_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast*>(taskData->outputs[0])[0] = res_; + return true; +} From c10dd8cecb29dc6ab37e80f290757dba899a82e6 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Wed, 6 Nov 2024 08:33:50 +0800 Subject: [PATCH 114/155] =?UTF-8?q?Revert=20"=D0=9E=D0=B4=D0=B8=D0=BD?= =?UTF-8?q?=D1=86=D0=BE=D0=B2=20=D0=9C=D0=B8=D1=85=D0=B0=D0=B8=D0=BB.=20?= =?UTF-8?q?=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2027=20=D0=9F=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=87=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0?= =?UTF-8?q?=20=D0=BD=D0=B5=D1=81=D0=BE=D0=B2=D0=BF=D0=B0=D0=B4=D0=B0=D1=8E?= =?UTF-8?q?=D1=89=D0=B8=D1=85=20=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=B4=D0=B2=D1=83=D1=85=20=D1=81=D1=82=D1=80=D0=BE?= =?UTF-8?q?=D0=BA"=20(#226)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#131 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11684087102/job/32534595745 image --- .../func_tests/main.cpp | 263 ------------------ .../include/ops_mpi.hpp | 40 --- .../perf_tests/main.cpp | 90 ------ .../src/ops_mpi.cpp | 137 --------- .../func_tests/main.cpp | 108 ------- .../include/ops_seq.hpp | 23 -- .../perf_tests/main.cpp | 88 ------ .../src/ops_seq.cpp | 48 ---- 8 files changed, 797 deletions(-) delete mode 100644 tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp delete mode 100644 tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp delete mode 100644 tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp delete mode 100644 tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/src/ops_mpi.cpp delete mode 100644 tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp delete mode 100644 tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp delete mode 100644 tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp delete mode 100644 tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/src/ops_seq.cpp diff --git a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp deleted file mode 100644 index d091ad4dd05..00000000000 --- a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp +++ /dev/null @@ -1,263 +0,0 @@ - -#include - -#include -#include -#include - -#include "mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp" - -std::string get_random_str(size_t sz) { - const char characters[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrswxyz0123456789"; - std::string str; - - std::srand(std::time(nullptr)); - - for (size_t i = 0; i < sz; ++i) { - // Генерируем случайный индекс - int index = std::rand() % (sizeof(characters) - 1); - str = characters[index]; - } - - return str; -} -TEST(Parallel_MPI_count, sz_0) { - // Create data// - boost::mpi::communicator com; - char str1[] = ""; - char str2[] = ""; - std::vector in{str1, str2}; - std::vector out(1, 1); - std::vector out_s(1, 1); - // Create Task Data Parallel - std::shared_ptr taskDataPar = std::make_shared(); - if (com.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataPar->inputs_count.emplace_back(in.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataPar->outputs_count.emplace_back(out.size()); - } - - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); - ASSERT_EQ(testClassPar.validation(), true); - testClassPar.pre_processing(); - testClassPar.run(); - testClassPar.post_processing(); - - if (com.rank() == 0) { - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); - taskDataSeq->outputs_count.emplace_back(out_s.size()); - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); - ASSERT_EQ(testClassSeq.validation(), true); - testClassSeq.pre_processing(); - testClassSeq.run(); - testClassSeq.post_processing(); - ASSERT_EQ(out[0], out_s[0]); - } -} - -TEST(Parallel_MPI_count, sz_1) { - // Create data// - boost::mpi::communicator com; - std::string s1 = get_random_str(1); - std::string s2 = get_random_str(1); - std::vector in{s1.data(), s2.data()}; - std::vector out(1, 1); - std::vector out_s(1, 1); - // Create Task Data Parallel - std::shared_ptr taskDataPar = std::make_shared(); - if (com.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataPar->inputs_count.emplace_back(in.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataPar->outputs_count.emplace_back(out.size()); - } - - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); - ASSERT_EQ(testClassPar.validation(), true); - testClassPar.pre_processing(); - testClassPar.run(); - testClassPar.post_processing(); - - if (com.rank() == 0) { - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); - taskDataSeq->outputs_count.emplace_back(out_s.size()); - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); - ASSERT_EQ(testClassSeq.validation(), true); - testClassSeq.pre_processing(); - testClassSeq.run(); - testClassSeq.post_processing(); - ASSERT_EQ(out[0], out_s[0]); - } -} -TEST(Parallel_MPI_count, sz_36) { - // Create data// - boost::mpi::communicator com; - std::string s1 = get_random_str(36); - std::string s2 = get_random_str(36); - std::vector in{s1.data(), s2.data()}; - std::vector out(1, 1); - std::vector out_s(1, 1); - // Create Task Data Parallel - std::shared_ptr taskDataPar = std::make_shared(); - if (com.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataPar->inputs_count.emplace_back(in.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataPar->outputs_count.emplace_back(out.size()); - } - - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); - ASSERT_EQ(testClassPar.validation(), true); - testClassPar.pre_processing(); - testClassPar.run(); - testClassPar.post_processing(); - - if (com.rank() == 0) { - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); - taskDataSeq->outputs_count.emplace_back(out_s.size()); - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); - ASSERT_EQ(testClassSeq.validation(), true); - testClassSeq.pre_processing(); - testClassSeq.run(); - testClassSeq.post_processing(); - ASSERT_EQ(out[0], out_s[0]); - } -} - -TEST(Parallel_MPI_count, sz_24) { - // Create data - boost::mpi::communicator com; - std::vector out_s(1, 1); - std::string s1 = get_random_str(24); - std::string s2 = get_random_str(24); - - std::vector in{s1.data(), s2.data()}; - std::vector out(1, 1); - - // Create Task Data Parallel - std::shared_ptr taskDataPar = std::make_shared(); - if (com.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataPar->inputs_count.emplace_back(in.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataPar->outputs_count.emplace_back(out.size()); - } - - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); - ASSERT_EQ(testClassPar.validation(), true); - testClassPar.pre_processing(); - testClassPar.run(); - testClassPar.post_processing(); - - if (com.rank() == 0) { - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); - taskDataSeq->outputs_count.emplace_back(out_s.size()); - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); - ASSERT_EQ(testClassSeq.validation(), true); - testClassSeq.pre_processing(); - testClassSeq.run(); - testClassSeq.post_processing(); - ASSERT_EQ(out[0], out_s[0]); - } -} -TEST(Parallel_MPI_count, df_sz_15) { - // Create data// - boost::mpi::communicator com; - std::string s1 = get_random_str(12); - std::string s2 = get_random_str(12); - - std::vector in{s1.data(), s2.data()}; - std::vector out(1, 1); - std::vector out_s(1, 1); - // Create Task Data Parallel - std::shared_ptr taskDataPar = std::make_shared(); - if (com.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataPar->inputs_count.emplace_back(in.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataPar->outputs_count.emplace_back(out.size()); - } - - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); - ASSERT_EQ(testClassPar.validation(), true); - testClassPar.pre_processing(); - testClassPar.run(); - testClassPar.post_processing(); - - if (com.rank() == 0) { - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); - taskDataSeq->outputs_count.emplace_back(out_s.size()); - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); - ASSERT_EQ(testClassSeq.validation(), true); - testClassSeq.pre_processing(); - testClassSeq.run(); - testClassSeq.post_processing(); - ASSERT_EQ(out[0], out_s[0]); - } -} -TEST(Parallel_MPI_count, df_sz_25) { - // Create data// - boost::mpi::communicator com; - std::string s1 = get_random_str(13); - std::string s2 = get_random_str(12); - - std::vector in{s1.data(), s2.data()}; - std::vector out(1, 1); - std::vector out_s(1, 1); - // Create Task Data Parallel - std::shared_ptr taskDataPar = std::make_shared(); - if (com.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataPar->inputs_count.emplace_back(in.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataPar->outputs_count.emplace_back(out.size()); - } - - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPIParallel testClassPar(taskDataPar); - ASSERT_EQ(testClassPar.validation(), true); - testClassPar.pre_processing(); - testClassPar.run(); - testClassPar.post_processing(); - - if (com.rank() == 0) { - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out_s.data())); - taskDataSeq->outputs_count.emplace_back(out_s.size()); - Odintsov_M_CountingMismatchedCharactersStr_mpi::CountingCharacterMPISequential testClassSeq(taskDataSeq); - ASSERT_EQ(testClassSeq.validation(), true); - testClassSeq.pre_processing(); - testClassSeq.run(); - testClassSeq.post_processing(); - ASSERT_EQ(out[0], out_s[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp deleted file mode 100644 index 61b8a46297b..00000000000 --- a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp +++ /dev/null @@ -1,40 +0,0 @@ - -#pragma once - -#include -#include -#include -#include - -#include "core/task/include/task.hpp" -namespace Odintsov_M_CountingMismatchedCharactersStr_mpi { - -class CountingCharacterMPISequential : public ppc::core::Task { - public: - explicit CountingCharacterMPISequential(std::shared_ptr taskData_) - : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input; - int ans{}; -}; - -class CountingCharacterMPIParallel : public ppc::core::Task { - public: - explicit CountingCharacterMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector local_input; - std::vector input; - int ans{}; - boost::mpi::communicator com; -}; -} // namespace Odintsov_M_CountingMismatchedCharactersStr_mpi \ No newline at end of file diff --git a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp deleted file mode 100644 index fea00e10845..00000000000 --- a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp +++ /dev/null @@ -1,90 +0,0 @@ -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp" - -TEST(MPI_parallel_perf_test, my_test_pipeline_run) { - boost::mpi::communicator com; - char str1[] = "qbrkyndjjobh"; - char str2[] = "qellowhwmvpt"; - std::vector in{str1, str2}; - std::vector out(1, 1); - - // Create Task Data Parallel - std::shared_ptr taskDataPar = std::make_shared(); - if (com.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataPar->inputs_count.emplace_back(in.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataPar->outputs_count.emplace_back(out.size()); - } - // Create Task - auto testClassPar = - std::make_shared(taskDataPar); - ASSERT_EQ(testClassPar->validation(), true); - testClassPar->pre_processing(); - testClassPar->run(); - testClassPar->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testClassPar); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (com.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(10, out[0]); - } -} -TEST(MPI_parallel_perf_test, my_test_task_run) { - boost::mpi::communicator com; - char str1[] = "qbrkyndjjobh"; - char str2[] = "qellowhwmvpt"; - std::vector in{str1, str2}; - std::vector out(1, 1); - - // Create Task Data Parallel// - std::shared_ptr taskDataPar = std::make_shared(); - if (com.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataPar->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataPar->inputs_count.emplace_back(in.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataPar->outputs_count.emplace_back(out.size()); - } - // Create Task - auto testClassPar = - std::make_shared(taskDataPar); - ASSERT_EQ(testClassPar->validation(), true); - testClassPar->pre_processing(); - testClassPar->run(); - testClassPar->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testClassPar); - perfAnalyzer->task_run(perfAttr, perfResults); - if (com.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(10, out[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/src/ops_mpi.cpp b/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/src/ops_mpi.cpp deleted file mode 100644 index a6952bdfa5a..00000000000 --- a/tasks/mpi/Odintsov_M_CountingMismatchedCharactersStr/src/ops_mpi.cpp +++ /dev/null @@ -1,137 +0,0 @@ - -#include "mpi/Odintsov_M_CountingMismatchedCharactersStr/include/ops_mpi.hpp" - -#include -#include -#include -#include - -using namespace std::chrono_literals; -using namespace Odintsov_M_CountingMismatchedCharactersStr_mpi; - -// Последовательная версия -bool CountingCharacterMPISequential::validation() { - internal_order_test(); - // Проверка на то, что у нас 2 строки на входе и одно число на выходе - bool ans_out = (taskData->inputs_count[0] == 2); - bool ans_in = (taskData->outputs_count[0] == 1); - return (ans_in) && (ans_out); -} -bool CountingCharacterMPISequential::pre_processing() { - internal_order_test(); - // инициализация инпута - if (strlen(reinterpret_cast(taskData->inputs[0])) >= strlen(reinterpret_cast(taskData->inputs[1]))) { - input.push_back(reinterpret_cast(taskData->inputs[0])); - input.push_back(reinterpret_cast(taskData->inputs[1])); - } else { - input.push_back(reinterpret_cast(taskData->inputs[1])); - input.push_back(reinterpret_cast(taskData->inputs[0])); - } - // Инициализация ответа - ans = 0; - return true; -} -bool CountingCharacterMPISequential::run() { - internal_order_test(); - auto *it1 = input[0]; - auto *it2 = input[1]; - while (*it1 != '\0' && *it2 != '\0') { - if (*it1 != *it2) { - ans += 2; - } - ++it1; - ++it2; - } - ans += std::strlen(it1) + std::strlen(it2); - return true; -} -bool CountingCharacterMPISequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = ans; - return true; -} -// Параллельная версия -bool CountingCharacterMPIParallel::validation() { - internal_order_test(); - // Проверка на то, что у нас 2 строки на входе и одно число на выходе - if (com.rank() == 0) { - bool ans_out = (taskData->inputs_count[0] == 2); - bool ans_in = (taskData->outputs_count[0] == 1); - return (ans_in) && (ans_out); - } - return true; -} - -bool CountingCharacterMPIParallel::pre_processing() { - internal_order_test(); - if (com.rank() == 0) { - // инициализация инпута - if (strlen(reinterpret_cast(taskData->inputs[0])) >= - strlen(reinterpret_cast(taskData->inputs[1]))) { - input.push_back(reinterpret_cast(taskData->inputs[0])); - input.push_back(reinterpret_cast(taskData->inputs[1])); - } else { - input.push_back(reinterpret_cast(taskData->inputs[1])); - input.push_back(reinterpret_cast(taskData->inputs[0])); - } - // Слчай если строки разной длины - if (strlen(input[0]) != (strlen(input[1]))) { - ans = strlen(input[0]) - strlen(input[1]); - input[0][strlen(input[1])] = '\0'; - } else { - ans = 0; - } - } - return true; -} -bool CountingCharacterMPIParallel::run() { - internal_order_test(); - // Пересылка - size_t loc_size = 0; - // Инициализация в 0 поток - if (com.rank() == 0) { - // Инициализация loc_size; - loc_size = (strlen(input[0]) + com.size() - 1) / - com.size(); // Округляем вверх, чтобы при большем количестве потоков loc_size = 1 - } - broadcast(com, loc_size, 0); - if (com.rank() == 0) { - for (int pr = 1; pr < com.size(); pr++) { - size_t send_size = - std::min(loc_size, strlen(input[0] - pr * loc_size)); // Ограничиваем размар отправляемых данных - com.send(pr, 0, input[0] + pr * loc_size, send_size); - com.send(pr, 0, input[1] + pr * loc_size, send_size); - } - } - if (com.rank() == 0) { - std::string str1(input[0], loc_size); - std::string str2(input[1], loc_size); - local_input.push_back(str1); - local_input.push_back(str2); - } else { - std::string str1('0', loc_size); - std::string str2('0', loc_size); - com.recv(0, 0, str1.data(), loc_size); - com.recv(0, 0, str2.data(), loc_size); - local_input.push_back(str1); - local_input.push_back(str2); - } - size_t size_1 = local_input[0].size(); - // Реализация - int loc_res = 0; - for (size_t i = 0; i < size_1; i++) { - if (local_input[0][i] != local_input[1][i]) { - loc_res += 2; - } - } - reduce(com, loc_res, ans, std::plus(), 0); - return true; -} - -bool CountingCharacterMPIParallel::post_processing() { - internal_order_test(); - if (com.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = ans; - } - return true; -} \ No newline at end of file diff --git a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp deleted file mode 100644 index 76bf51d46c3..00000000000 --- a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/func_tests/main.cpp +++ /dev/null @@ -1,108 +0,0 @@ - -#include - -#include - -#include "seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp" - -TEST(Sequential_count, ans_8) { - // Create data - - char str1[] = "qwert"; - char str2[] = "qello"; - - std::vector in{str1, str2}; - std::vector out(1, 1); - - // Create TaskData// - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential testClass(taskDataSeq); - ASSERT_EQ(testClass.validation(), true); - testClass.pre_processing(); - testClass.run(); - testClass.post_processing(); - - ASSERT_EQ(8, out[0]); -} - -TEST(Sequential_count, ans_0) { - // Create data - char str1[] = "qwert"; - char str2[] = "qwert"; - std::vector in{str1, str2}; - std::vector out(1, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential testClass(taskDataSeq); - ASSERT_EQ(testClass.validation(), true); - testClass.pre_processing(); - testClass.run(); - testClass.post_processing(); - - ASSERT_EQ(0, out[0]); -} -TEST(Sequential_count, ans_10) { - // Create data - char str1[] = "qwert"; - - char str2[] = "asdfg"; - - std::vector in{str1, str2}; - std::vector out(1, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential testClass(taskDataSeq); - ASSERT_EQ(testClass.validation(), true); - testClass.pre_processing(); - testClass.run(); - testClass.post_processing(); - - ASSERT_EQ(10, out[0]); -} -TEST(Sequential_count, ans_11) { - // Create data - char str1[] = "qwerta"; - char str2[] = "asdfg"; - - std::vector in{str1, str2}; - std::vector out(1, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential testClass(taskDataSeq); - ASSERT_EQ(testClass.validation(), true); - testClass.pre_processing(); - testClass.run(); - testClass.post_processing(); - ASSERT_EQ(11, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp deleted file mode 100644 index db67e1d7825..00000000000 --- a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp +++ /dev/null @@ -1,23 +0,0 @@ - -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" -namespace Odintsov_M_CountingMismatchedCharactersStr_seq { - -class CountingCharacterSequential : public ppc::core::Task { - public: - explicit CountingCharacterSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input; - int ans{}; -}; - -} // namespace Odintsov_M_CountingMismatchedCharactersStr_seq \ No newline at end of file diff --git a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp deleted file mode 100644 index 6a57c5a9b47..00000000000 --- a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/perf_tests/main.cpp +++ /dev/null @@ -1,88 +0,0 @@ -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp" -TEST(sequential_my_perf_test, my_test_pipeline_run) { - // Create data - char str1[] = "qwert"; - char str2[] = "qello"; - - std::vector in{str1, str2}; - std::vector out(1, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testClass = - std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 15; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testClass); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - - ppc::core::Perf::print_perf_statistic(perfResults); - - ASSERT_EQ(8, out[0]); -} - -TEST(sequential_my_perf_test, test_task_run) { - char str1[] = "qwert"; - char str2[] = "qello"; - - std::vector in{str1, str2}; - std::vector out(1, 1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0])); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1])); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testClass = - std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 15; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testClass); - perfAnalyzer->task_run(perfAttr, perfResults); - - ppc::core::Perf::print_perf_statistic(perfResults); - - ASSERT_EQ(8, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/src/ops_seq.cpp b/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/src/ops_seq.cpp deleted file mode 100644 index 91571ced318..00000000000 --- a/tasks/seq/Odintsov_M_CountingMismatchedCharactersStr/src/ops_seq.cpp +++ /dev/null @@ -1,48 +0,0 @@ - -#include "seq/Odintsov_M_CountingMismatchedCharactersStr/include/ops_seq.hpp" - -#include -#include - -using namespace std::chrono_literals; - -bool Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential::validation() { - internal_order_test(); - // Проверка на то, что у нас 2 строки на входе и одно число на выходе - bool ans_out = (taskData->inputs_count[0] == 2); - bool ans_in = (taskData->outputs_count[0] == 1); - return (ans_in) && (ans_out); -} -bool Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential::pre_processing() { - internal_order_test(); - // инициализация инпута - if (strlen(reinterpret_cast(taskData->inputs[0])) >= strlen(reinterpret_cast(taskData->inputs[1]))) { - input.push_back(reinterpret_cast(taskData->inputs[0])); - input.push_back(reinterpret_cast(taskData->inputs[1])); - } else { - input.push_back(reinterpret_cast(taskData->inputs[1])); - input.push_back(reinterpret_cast(taskData->inputs[0])); - } - // Инициализация ответа - ans = 0; - return true; -} -bool Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential::run() { - internal_order_test(); - auto *it1 = input[0]; - auto *it2 = input[1]; - while (*it1 != '\0' && *it2 != '\0') { - if (*it1 != *it2) { - ans += 2; - } - ++it1; - ++it2; - } - ans += std::strlen(it1) + std::strlen(it2); - return true; -} -bool Odintsov_M_CountingMismatchedCharactersStr_seq::CountingCharacterSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = ans; - return true; -} From 23b03e02c8e63f875d58d1ed3bdd85d8d43288bc Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Wed, 6 Nov 2024 08:56:05 +0800 Subject: [PATCH 115/155] =?UTF-8?q?Revert=20"=D0=9A=D0=BE=D0=BD=D0=B4?= =?UTF-8?q?=D1=80=D0=B0=D1=82=D1=8C=D0=B5=D0=B2=20=D0=AF=D1=80=D0=BE=D1=81?= =?UTF-8?q?=D0=BB=D0=B0=D0=B2.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.?= =?UTF-8?q?=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2016.=20=D0=9D?= =?UTF-8?q?=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC?= =?UTF-8?q?=D0=B0=D0=BA=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B?= =?UTF-8?q?=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20?= =?UTF-8?q?=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE=D0=BB=D0=B1=D1=86=D0=B0=D0=BC?= =?UTF-8?q?=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B"=20(#227)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#76 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11695056155/job/32569821733 image --- .../func_tests/main.cpp | 125 ------------- .../include/ops_mpi.hpp | 51 ------ .../perf_tests/main.cpp | 127 -------------- .../src/ops_mpi.cpp | 165 ------------------ .../func_tests/main.cpp | 107 ------------ .../include/ops_seq.hpp | 25 --- .../perf_tests/main.cpp | 117 ------------- .../src/ops_seq.cpp | 55 ------ 8 files changed, 772 deletions(-) delete mode 100644 tasks/mpi/kondratev_ya_max_col_matrix/func_tests/main.cpp delete mode 100644 tasks/mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp delete mode 100644 tasks/mpi/kondratev_ya_max_col_matrix/perf_tests/main.cpp delete mode 100644 tasks/mpi/kondratev_ya_max_col_matrix/src/ops_mpi.cpp delete mode 100644 tasks/seq/kondratev_ya_max_col_matrix/func_tests/main.cpp delete mode 100644 tasks/seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp delete mode 100644 tasks/seq/kondratev_ya_max_col_matrix/perf_tests/main.cpp delete mode 100644 tasks/seq/kondratev_ya_max_col_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/kondratev_ya_max_col_matrix/func_tests/main.cpp b/tasks/mpi/kondratev_ya_max_col_matrix/func_tests/main.cpp deleted file mode 100644 index a76cc30f3da..00000000000 --- a/tasks/mpi/kondratev_ya_max_col_matrix/func_tests/main.cpp +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include -#include - -#include "mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp" - -std::vector> getRandomMatrix(uint32_t row, uint32_t col) { - int32_t low = -200; - int32_t high = 200; - - std::random_device dev; - std::mt19937 gen(dev()); - std::vector> mtrx(row, std::vector(col)); - for (uint32_t i = 0; i < row; i++) { - for (uint32_t j = 0; j < col; j++) { - mtrx[i][j] = low + gen() % (high - low + 1); - } - } - return mtrx; -} - -void runTask(ppc::core::Task& task) { - ASSERT_TRUE(task.validation()); - task.pre_processing(); - task.run(); - task.post_processing(); -} - -void fillTaskData(std::shared_ptr& taskData, uint32_t row, uint32_t col, auto& mtrx, auto& res) { - for (auto& mtrxRow : mtrx) taskData->inputs.emplace_back(reinterpret_cast(mtrxRow.data())); - taskData->inputs_count.emplace_back(row); - taskData->inputs_count.emplace_back(col); - taskData->outputs.emplace_back(reinterpret_cast(res.data())); - taskData->outputs_count.emplace_back(res.size()); -} - -TEST(kondratev_ya_max_col_matrix_mpi, test_1) { - uint32_t row = 100; - uint32_t col = 100; - - boost::mpi::communicator world; - std::vector res(col); - std::vector> mtrx; - - auto taskDataPar = std::make_shared(); - if (world.rank() == 0) { - mtrx = getRandomMatrix(row, col); - fillTaskData(taskDataPar, row, col, mtrx, res); - } - - kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - runTask(testMpiTaskParallel); - - if (world.rank() == 0) { - std::vector ref(col); - std::shared_ptr taskDataSeq = std::make_shared(); - fillTaskData(taskDataSeq, row, col, mtrx, ref); - - kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - runTask(testMpiTaskSequential); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); - } -} - -TEST(kondratev_ya_max_col_matrix_mpi, test_2) { - uint32_t row = 1000; - uint32_t col = 50; - - boost::mpi::communicator world; - std::vector res(col); - std::vector> mtrx; - - auto taskDataPar = std::make_shared(); - if (world.rank() == 0) { - mtrx = getRandomMatrix(row, col); - fillTaskData(taskDataPar, row, col, mtrx, res); - } - - kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - runTask(testMpiTaskParallel); - if (world.rank() == 0) { - std::vector ref(col); - std::shared_ptr taskDataSeq = std::make_shared(); - fillTaskData(taskDataSeq, row, col, mtrx, ref); - - kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - runTask(testMpiTaskSequential); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); - } -} - -TEST(kondratev_ya_max_col_matrix_mpi, test_3) { - uint32_t row = 500; - uint32_t col = 1000; - - boost::mpi::communicator world; - std::vector res(col); - std::vector> mtrx; - - auto taskDataPar = std::make_shared(); - if (world.rank() == 0) { - mtrx = getRandomMatrix(row, col); - fillTaskData(taskDataPar, row, col, mtrx, res); - } - - kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - runTask(testMpiTaskParallel); - - if (world.rank() == 0) { - std::vector ref(col); - std::shared_ptr taskDataSeq = std::make_shared(); - fillTaskData(taskDataSeq, row, col, mtrx, ref); - - kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - runTask(testMpiTaskSequential); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); - } -} \ No newline at end of file diff --git a/tasks/mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp b/tasks/mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp deleted file mode 100644 index f5880d9ef44..00000000000 --- a/tasks/mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace kondratev_ya_max_col_matrix_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - std::vector res_; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - std::vector> local_input_; - std::vector res_; - - uint32_t row_; - uint32_t col_; - uint32_t step_; - uint32_t remain_; - boost::mpi::communicator world; -}; - -} // namespace kondratev_ya_max_col_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/kondratev_ya_max_col_matrix/perf_tests/main.cpp b/tasks/mpi/kondratev_ya_max_col_matrix/perf_tests/main.cpp deleted file mode 100644 index 2ac3a61ffed..00000000000 --- a/tasks/mpi/kondratev_ya_max_col_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp" - -std::vector> getRandomMatrix(uint32_t row, uint32_t col) { - int32_t low = -200; - int32_t high = 200; - - std::random_device dev; - std::mt19937 gen(dev()); - std::vector> mtrx(row, std::vector(col)); - for (uint32_t i = 0; i < row; i++) { - for (uint32_t j = 0; j < col; j++) { - mtrx[i][j] = low + gen() % (high - low + 1); - } - } - return mtrx; -} - -void insertRefValue(std::vector>& mtrx, int32_t ref) { - std::random_device dev; - std::mt19937 gen(dev()); - - uint32_t ind; - uint32_t row = mtrx.size(); - uint32_t col = mtrx[0].size(); - - for (uint32_t j = 0; j < col; j++) { - ind = gen() % row; - mtrx[ind][j] = ref; - } -} - -void runTask(ppc::core::Task& task) { - ASSERT_TRUE(task.validation()); - task.pre_processing(); - task.run(); - task.post_processing(); -} - -void fillTaskData(std::shared_ptr& taskData, uint32_t row, uint32_t col, auto& mtrx, auto& res) { - for (auto& mtrxRow : mtrx) taskData->inputs.emplace_back(reinterpret_cast(mtrxRow.data())); - taskData->inputs_count.emplace_back(row); - taskData->inputs_count.emplace_back(col); - taskData->outputs.emplace_back(reinterpret_cast(res.data())); - taskData->outputs_count.emplace_back(res.size()); -} - -TEST(kondratev_ya_max_col_matrix_mpi, test_pipeline_run) { - uint32_t row = 6000; - uint32_t col = 6000; - int32_t ref_val = INT_MAX; - - boost::mpi::communicator world; - std::vector res(col); - std::vector ref(col, ref_val); - std::vector> mtrx; - - auto taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - mtrx = getRandomMatrix(row, col); - insertRefValue(mtrx, ref_val); - fillTaskData(taskDataPar, row, col, mtrx, res); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - runTask(*testMpiTaskParallel); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); -} - -TEST(kondratev_ya_max_col_matrix_mpi_perf_test, test_task_run) { - uint32_t row = 6000; - uint32_t col = 6000; - int32_t ref_val = INT_MAX; - - boost::mpi::communicator world; - std::vector res(col); - std::vector ref(col, ref_val); - std::vector> mtrx; - - auto taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - mtrx = getRandomMatrix(row, col); - insertRefValue(mtrx, ref_val); - fillTaskData(taskDataPar, row, col, mtrx, res); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - runTask(*testMpiTaskParallel); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); -} diff --git a/tasks/mpi/kondratev_ya_max_col_matrix/src/ops_mpi.cpp b/tasks/mpi/kondratev_ya_max_col_matrix/src/ops_mpi.cpp deleted file mode 100644 index 313d0e034af..00000000000 --- a/tasks/mpi/kondratev_ya_max_col_matrix/src/ops_mpi.cpp +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "mpi/kondratev_ya_max_col_matrix/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - - uint32_t row = taskData->inputs_count[0]; - uint32_t col = taskData->inputs_count[1]; - - std::vector tmp(row); - for (uint32_t i = 0; i < row; i++) { - tmp[i] = reinterpret_cast(taskData->inputs[i]); - } - - input_.resize(col, std::vector(row)); - for (uint32_t j = 0; j < col; j++) { - for (uint32_t i = 0; i < row; i++) { - input_[j][i] = tmp[i][j]; - } - } - res_.resize(col); - - return true; -} - -bool kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - - return taskData->outputs_count[0] == taskData->inputs_count[1] && !taskData->outputs.empty() && - !taskData->inputs.empty(); -} - -bool kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential::run() { - internal_order_test(); - - for (uint32_t i = 0; i < input_.size(); i++) { - res_[i] = *std::max_element(input_[i].begin(), input_[i].end()); - } - - return true; -} - -bool kondratev_ya_max_col_matrix_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - - auto* output_matrix = reinterpret_cast(taskData->outputs[0]); - std::copy(res_.begin(), res_.end(), output_matrix); - - return true; -} - -bool kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - if (world.rank() == 0) { - row_ = taskData->inputs_count[0]; - col_ = taskData->inputs_count[1]; - - std::vector tmp(row_); - for (uint32_t i = 0; i < row_; i++) { - tmp[i] = reinterpret_cast(taskData->inputs[i]); - } - - input_.resize(col_, std::vector(row_)); - for (uint32_t j = 0; j < col_; j++) { - for (uint32_t i = 0; i < row_; i++) { - input_[j][i] = tmp[i][j]; - } - } - res_.resize(col_); - } - - return true; -} - -bool kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - - if (world.rank() == 0) { - return taskData->outputs_count[0] == taskData->inputs_count[1] && !taskData->outputs.empty() && - !taskData->inputs.empty(); - } - return true; -} - -bool kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - broadcast(world, row_, 0); - broadcast(world, col_, 0); - - step_ = col_ / world.size(); - remain_ = col_ % world.size(); - - uint32_t recvSize = 0; - - if (world.rank() == 0) { - uint32_t worldSize = world.size(); - uint32_t ind = step_; - if (remain_ > 0) ind++; - - for (uint32_t i = 1; i < worldSize; i++) { - recvSize = step_; - if (i < remain_) recvSize++; - - for (uint32_t j = 0; j < recvSize; j++) { - world.send(i, 0, input_[ind++]); - } - } - } - - recvSize = step_; - if (static_cast(world.rank()) < remain_) recvSize++; - local_input_.resize(recvSize, std::vector(row_)); - - if (world.rank() == 0) { - std::copy(input_.begin(), input_.begin() + recvSize, local_input_.begin()); - } else { - for (uint32_t i = 0; i < recvSize; i++) { - world.recv(0, 0, local_input_[i]); - } - } - - std::vector loc_max(local_input_.size()); - for (size_t i = 0; i < loc_max.size(); i++) { - loc_max[i] = *std::max_element(local_input_[i].begin(), local_input_[i].end()); - } - - if (world.rank() == 0) { - std::copy(loc_max.begin(), loc_max.end(), res_.begin()); - - std::vector sizes(world.size(), step_); - for (uint32_t i = 0; i < remain_; i++) sizes[i]++; - - uint32_t ind = sizes[0]; - for (int32_t i = 1; i < world.size(); i++) { - world.recv(i, 0, &res_[ind], sizes[i]); - ind += sizes[i]; - } - } else { - world.send(0, 0, loc_max.data(), loc_max.size()); - } - - return true; -} - -bool kondratev_ya_max_col_matrix_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - - if (world.rank() == 0) { - auto* output = reinterpret_cast(taskData->outputs[0]); - std::copy(res_.begin(), res_.end(), output); - } - - return true; -} diff --git a/tasks/seq/kondratev_ya_max_col_matrix/func_tests/main.cpp b/tasks/seq/kondratev_ya_max_col_matrix/func_tests/main.cpp deleted file mode 100644 index c7897ca095f..00000000000 --- a/tasks/seq/kondratev_ya_max_col_matrix/func_tests/main.cpp +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp" - -std::vector> getRandomMatrix(uint32_t row, uint32_t col) { - int32_t low = -200; - int32_t high = 200; - - std::random_device dev; - std::mt19937 gen(dev()); - std::vector> mtrx(row, std::vector(col)); - for (uint32_t i = 0; i < row; i++) { - for (uint32_t j = 0; j < col; j++) { - mtrx[i][j] = low + gen() % (high - low + 1); - } - } - return mtrx; -} - -void insertRefValue(std::vector>& mtrx, int32_t ref) { - std::random_device dev; - std::mt19937 gen(dev()); - - uint32_t ind; - uint32_t row = mtrx.size(); - uint32_t col = mtrx[0].size(); - - for (uint32_t j = 0; j < col; j++) { - ind = gen() % row; - mtrx[ind][j] = ref; - } -} - -void runTask(ppc::core::Task& task) { - ASSERT_TRUE(task.validation()); - task.pre_processing(); - task.run(); - task.post_processing(); -} - -void fillTaskData(std::shared_ptr& taskData, uint32_t row, uint32_t col, auto& mtrx, auto& res) { - for (auto& mtrxRow : mtrx) taskData->inputs.emplace_back(reinterpret_cast(mtrxRow.data())); - taskData->inputs_count.emplace_back(row); - taskData->inputs_count.emplace_back(col); - taskData->outputs.emplace_back(reinterpret_cast(res.data())); - taskData->outputs_count.emplace_back(res.size()); -} - -TEST(kondratev_ya_max_col_matrix_seq, test_1) { - uint32_t row = 100; - uint32_t col = 100; - int32_t ref_val = INT_MAX; - - std::vector res(col); - std::vector ref(col, ref_val); - std::vector> mtrx = getRandomMatrix(row, col); - insertRefValue(mtrx, ref_val); - - auto taskDataSeq = std::make_shared(); - fillTaskData(taskDataSeq, row, col, mtrx, res); - - kondratev_ya_max_col_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - runTask(testTaskSequential); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); -} - -TEST(kondratev_ya_max_col_matrix_seq, test_2) { - uint32_t row = 1000; - uint32_t col = 50; - int32_t ref_val = INT_MAX; - - std::vector res(col); - std::vector ref(col, ref_val); - std::vector> mtrx = getRandomMatrix(row, col); - insertRefValue(mtrx, ref_val); - - auto taskDataSeq = std::make_shared(); - fillTaskData(taskDataSeq, row, col, mtrx, res); - - kondratev_ya_max_col_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - runTask(testTaskSequential); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); -} - -TEST(kondratev_ya_max_col_matrix_seq, test_3) { - uint32_t row = 500; - uint32_t col = 1000; - int32_t ref_val = INT_MAX; - std::vector res(col); - std::vector ref(col, ref_val); - std::vector> mtrx = getRandomMatrix(row, col); - insertRefValue(mtrx, ref_val); - - auto taskDataSeq = std::make_shared(); - fillTaskData(taskDataSeq, row, col, mtrx, res); - - kondratev_ya_max_col_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - runTask(testTaskSequential); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); -} \ No newline at end of file diff --git a/tasks/seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp b/tasks/seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp deleted file mode 100644 index e37ed3478b4..00000000000 --- a/tasks/seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace kondratev_ya_max_col_matrix_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - std::vector res_; -}; - -} // namespace kondratev_ya_max_col_matrix_seq \ No newline at end of file diff --git a/tasks/seq/kondratev_ya_max_col_matrix/perf_tests/main.cpp b/tasks/seq/kondratev_ya_max_col_matrix/perf_tests/main.cpp deleted file mode 100644 index da20f13454b..00000000000 --- a/tasks/seq/kondratev_ya_max_col_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp" - -std::vector> getRandomMatrix(uint32_t row, uint32_t col) { - int32_t low = -200; - int32_t high = 200; - - std::random_device dev; - std::mt19937 gen(dev()); - std::vector> mtrx(row, std::vector(col)); - for (uint32_t i = 0; i < row; i++) { - for (uint32_t j = 0; j < col; j++) { - mtrx[i][j] = low + gen() % (high - low + 1); - } - } - return mtrx; -} - -void insertRefValue(std::vector>& mtrx, int32_t ref) { - std::random_device dev; - std::mt19937 gen(dev()); - - uint32_t ind; - uint32_t row = mtrx.size(); - uint32_t col = mtrx[0].size(); - - for (uint32_t j = 0; j < col; j++) { - ind = gen() % row; - mtrx[ind][j] = ref; - } -} - -void fillTaskData(std::shared_ptr& taskData, uint32_t row, uint32_t col, auto& mtrx, auto& res) { - for (auto& mtrxRow : mtrx) taskData->inputs.emplace_back(reinterpret_cast(mtrxRow.data())); - taskData->inputs_count.emplace_back(row); - taskData->inputs_count.emplace_back(col); - taskData->outputs.emplace_back(reinterpret_cast(res.data())); - taskData->outputs_count.emplace_back(res.size()); -} - -TEST(kondratev_ya_max_col_matrix_seq, test_pipeline_run) { - uint32_t row = 6000; - uint32_t col = 6000; - int32_t ref_val = INT_MAX; - - std::vector res(col); - std::vector ref(col, ref_val); - std::vector> mtrx = getRandomMatrix(row, col); - insertRefValue(mtrx, ref_val); - - auto taskDataSeq = std::make_shared(); - fillTaskData(taskDataSeq, row, col, mtrx, res); - - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; // Set the number of runs as needed - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); -} - -TEST(kondratev_ya_max_col_matrix_seq, test_task_run) { - uint32_t row = 6000; - uint32_t col = 6000; - int32_t ref_val = INT_MAX; - - std::vector res(col); - std::vector ref(col, ref_val); - std::vector> mtrx = getRandomMatrix(row, col); - insertRefValue(mtrx, ref_val); - - auto taskDataSeq = std::make_shared(); - fillTaskData(taskDataSeq, row, col, mtrx, res); - - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - for (uint32_t i = 0; i < res.size(); i++) ASSERT_EQ(res[i], ref[i]); -} diff --git a/tasks/seq/kondratev_ya_max_col_matrix/src/ops_seq.cpp b/tasks/seq/kondratev_ya_max_col_matrix/src/ops_seq.cpp deleted file mode 100644 index 84103c88d91..00000000000 --- a/tasks/seq/kondratev_ya_max_col_matrix/src/ops_seq.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2024 Nesterov Alexander -#include "seq/kondratev_ya_max_col_matrix/include/ops_seq.hpp" - -#include -#include - -using namespace std::chrono_literals; - -bool kondratev_ya_max_col_matrix_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - - uint32_t row = taskData->inputs_count[0]; - uint32_t col = taskData->inputs_count[1]; - - std::vector tmp(row); - for (uint32_t i = 0; i < row; i++) { - tmp[i] = reinterpret_cast(taskData->inputs[i]); - } - - input_.resize(col, std::vector(row)); - for (uint32_t j = 0; j < col; j++) { - for (uint32_t i = 0; i < row; i++) { - input_[j][i] = tmp[i][j]; - } - } - res_.resize(col); - - return true; -} - -bool kondratev_ya_max_col_matrix_seq::TestTaskSequential::validation() { - internal_order_test(); - - return taskData->outputs_count[0] == taskData->inputs_count[1] && !taskData->outputs.empty() && - !taskData->inputs.empty(); -} - -bool kondratev_ya_max_col_matrix_seq::TestTaskSequential::run() { - internal_order_test(); - - for (uint32_t i = 0; i < input_.size(); i++) { - res_[i] = *std::max_element(input_[i].begin(), input_[i].end()); - } - - return true; -} - -bool kondratev_ya_max_col_matrix_seq::TestTaskSequential::post_processing() { - internal_order_test(); - - auto* output_matrix = reinterpret_cast(taskData->outputs[0]); - std::copy(res_.begin(), res_.end(), output_matrix); - - return true; -} From 0e270de92f64b4f33f11287f66983d66bd8834ae Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Wed, 6 Nov 2024 09:06:02 +0800 Subject: [PATCH 116/155] =?UTF-8?q?Revert=20"=D0=9A=D1=83=D1=80=D0=B0?= =?UTF-8?q?=D0=BA=D0=B8=D0=BD=20=D0=9C=D0=B0=D1=82=D0=B2=D0=B5=D0=B9.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2017.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D1=80?= =?UTF-8?q?=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86?= =?UTF-8?q?=D1=8B."=20(#228)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#127 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11695174308/job/32570137631 image image --- .../func_tests/main.cpp | 555 ------------------ .../include/ops_mpi.hpp | 51 -- .../perf_tests/main.cpp | 104 ---- .../src/ops_mpi.cpp | 141 ----- .../func_tests/main.cpp | 131 ----- .../include/ops_seq.hpp | 26 - .../perf_tests/main.cpp | 100 ---- .../src/ops_seq.cpp | 46 -- 8 files changed, 1154 deletions(-) delete mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp delete mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp delete mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp delete mode 100644 tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp delete mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp delete mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp delete mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp delete mode 100644 tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp deleted file mode 100644 index 493bfbf1ff4..00000000000 --- a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp +++ /dev/null @@ -1,555 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include - -#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_1_1) { - int count_rows = 1; - int size_rows = 1; - boost::mpi::communicator world; - std::vector global_mat; - std::vector ans; - std::vector par_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = {5}; - ans = {5}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, ans); - ASSERT_EQ(par_min_vec, ans); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_5_1) { - int count_rows = 5; - int size_rows = 1; - boost::mpi::communicator world; - std::vector global_mat; - std::vector ans; - std::vector par_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = {5, 5, 5, 5, 5}; - ans = {5, 5, 5, 5, 5}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, ans); - ASSERT_EQ(par_min_vec, ans); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_1_5) { - int count_rows = 1; - int size_rows = 5; - boost::mpi::communicator world; - std::vector global_mat; - std::vector ans; - std::vector par_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = {5, 5, 5, 5, 5}; - ans = {5}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, ans); - ASSERT_EQ(par_min_vec, ans); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_3_5) { - int count_rows = 3; - int size_rows = 5; - boost::mpi::communicator world; - std::vector global_mat; - std::vector ans; - std::vector par_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = {1, 5, 3, 7, 9, 3, 4, 6, 7, 9, 2, 4, 2, 5, 0}; - ans = {1, 3, 0}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, ans); - ASSERT_EQ(par_min_vec, ans); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_3_6) { - int count_rows = 3; - int size_rows = 6; - boost::mpi::communicator world; - std::vector global_mat; - std::vector ans; - std::vector par_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9}; - ans = {3, 4, 2}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, ans); - ASSERT_EQ(par_min_vec, ans); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_MPI_and_Seq_4_5) { - int count_rows = 4; - int size_rows = 5; - boost::mpi::communicator world; - std::vector global_mat; - std::vector ans; - std::vector par_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9, 5, 8}; - ans = {3, 4, 2, 5}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, ans); - ASSERT_EQ(par_min_vec, ans); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_12) { - int count_rows = 10; - int size_rows = 12; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, par_min_vec); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_15) { - int count_rows = 10; - int size_rows = 15; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, par_min_vec); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_10_2) { - int count_rows = 10; - int size_rows = 2; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector ref_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(ref_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(ref_min_vec, par_min_vec); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Min_Rand_0_0) { - int count_rows = 0; - int size_rows = 0; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_FALSE(testMpiTaskParallel.validation()); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi, Test_Check_valdation) { - int count_rows = 10; - int size_rows = 10; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(count_rows * size_rows); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - - kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_FALSE(testMpiTaskParallel.validation()); - } -} \ No newline at end of file diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp deleted file mode 100644 index f1eb1dc4e09..00000000000 --- a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace kurakin_m_min_values_by_rows_matrix_mpi { - -std::vector getRandomVector(int sz); - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int count_rows{}; - int size_rows{}; - std::vector input_; - std::vector res; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int count_rows{}; - int size_rows{}; - int delta_proc{}; - std::vector input_, local_input_; - std::vector res; - boost::mpi::communicator world; -}; - -} // namespace kurakin_m_min_values_by_rows_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp deleted file mode 100644 index a290fa90e1a..00000000000 --- a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" - -TEST(kurakin_m_min_values_by_rows_matrix_mpi_perf_test, test_pipeline_run) { - int count_rows = 100; - int size_rows = 400; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = std::vector(count_rows * size_rows, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - for (unsigned i = 0; i < par_min_vec.size(); i++) { - EXPECT_EQ(1, par_min_vec[0]); - } - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_mpi_perf_test, test_task_run) { - int count_rows = 100; - int size_rows = 400; - boost::mpi::communicator world; - std::vector global_mat; - std::vector par_min_vec(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_mat = std::vector(count_rows * size_rows, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataPar->inputs_count.emplace_back(global_mat.size()); - taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataPar->inputs_count.emplace_back(static_cast(1)); - taskDataPar->outputs.emplace_back(reinterpret_cast(par_min_vec.data())); - taskDataPar->outputs_count.emplace_back(par_min_vec.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - for (unsigned i = 0; i < par_min_vec.size(); i++) { - EXPECT_EQ(1, par_min_vec[0]); - } - } -} diff --git a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp deleted file mode 100644 index cc2079ef09e..00000000000 --- a/tasks/mpi/kurakin_m_min_values_by_rows_matrix/src/ops_mpi.cpp +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "mpi/kurakin_m_min_values_by_rows_matrix/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -std::vector kurakin_m_min_values_by_rows_matrix_mpi::getRandomVector(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = gen() % 100; - } - return vec; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - - count_rows = (int)*taskData->inputs[1]; - size_rows = (int)*taskData->inputs[2]; - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - res = std::vector(count_rows, 0); - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - - return taskData->inputs.size() == 3 && taskData->inputs_count.size() == 3 && taskData->outputs.size() == 1 && - taskData->outputs_count.size() == 1 && *taskData->inputs[1] != 0 && *taskData->inputs[2] != 0 && - *taskData->inputs[1] == taskData->outputs_count[0]; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::run() { - internal_order_test(); - - for (int i = 0; i < count_rows; i++) { - res[i] = *std::min_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - - for (int i = 0; i < count_rows; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res[i]; - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - if (world.rank() == 0) { - count_rows = (int)*taskData->inputs[1]; - size_rows = (int)*taskData->inputs[2]; - if (taskData->inputs_count[0] % world.size() == 0) { - delta_proc = taskData->inputs_count[0] / world.size(); - } else { - delta_proc = taskData->inputs_count[0] / world.size() + 1; - } - input_ = std::vector(delta_proc * world.size(), INT_MAX); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - return taskData->inputs.size() == 3 && taskData->inputs_count.size() == 3 && taskData->outputs.size() == 1 && - taskData->outputs_count.size() == 1 && *taskData->inputs[1] != 0 && *taskData->inputs[2] != 0 && - *taskData->inputs[1] == taskData->outputs_count[0]; - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - broadcast(world, count_rows, 0); - broadcast(world, size_rows, 0); - broadcast(world, delta_proc, 0); - - local_input_ = std::vector(delta_proc); - boost::mpi::scatter(world, input_.data(), local_input_.data(), delta_proc, 0); - - res = std::vector(count_rows, INT_MAX); - - unsigned int last_delta = 0; - if (world.rank() == world.size() - 1) { - last_delta = local_input_.size() * world.size() - size_rows * count_rows; - } - - unsigned int ind = std::min(world.rank() * local_input_.size() / size_rows, static_cast(count_rows - 1)); - - unsigned int delta = std::min(local_input_.size(), size_rows - world.rank() * local_input_.size() % size_rows); - std::vector local_res(count_rows, INT_MAX); - - local_res[ind] = *std::min_element(local_input_.begin(), local_input_.begin() + delta); - ++ind; - - unsigned int k = 0; - while (local_input_.begin() + delta + k * size_rows < local_input_.end() - last_delta) { - local_res[ind] = - *std::min_element(local_input_.begin() + delta + k * size_rows, - std::min(local_input_.end(), local_input_.begin() + delta + (k + 1) * size_rows)); - ++k; - ++ind; - } - - for (unsigned int i = 0; i < res.size(); ++i) { - reduce(world, local_res[i], res[i], boost::mpi::minimum(), 0); - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - - if (world.rank() == 0) { - for (int i = 0; i < count_rows; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res[i]; - } - } - return true; -} diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp deleted file mode 100644 index 480eccc6ab7..00000000000 --- a/tasks/seq/kurakin_m_min_values_by_rows_matrix/func_tests/main.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" - -TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min1) { - int count_rows; - int size_rows; - - // Create data - count_rows = 3; - size_rows = 5; - std::vector global_mat = {1, 5, 3, 7, 9, 3, 4, 6, 7, 9, 2, 4, 2, 5, 0}; - - std::vector seq_min_vec(count_rows, 0); - std::vector ans = {1, 3, 0}; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, seq_min_vec); -} - -TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min2) { - int count_rows; - int size_rows; - - // Create data - count_rows = 3; - size_rows = 6; - std::vector global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9}; - - std::vector seq_min_vec(count_rows, 0); - std::vector ans = {3, 4, 2}; - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, seq_min_vec); -} - -TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min3) { - int count_rows; - int size_rows; - - // Create data - count_rows = 4; - size_rows = 5; - - std::vector global_mat = {10, 5, 3, 9, 7, 9, 13, 4, 6, 7, 7, 9, 12, 4, 2, 5, 10, 9, 5, 8}; - - std::vector seq_min_vec(count_rows, 0); - std::vector ans = {3, 4, 2, 5}; - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, seq_min_vec); -} - -TEST(kurakin_m_min_values_by_rows_matrix_seq, Test_Min_null) { - int count_rows; - int size_rows; - // Create data - count_rows = 0; - size_rows = 0; - std::vector global_mat(count_rows * size_rows); - std::vector seq_min_vec(count_rows, 0); - std::vector ans(count_rows, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - // Create Task - kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(seq_min_vec, ans); -} \ No newline at end of file diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp deleted file mode 100644 index 6c4d04360c4..00000000000 --- a/tasks/seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace kurakin_m_min_values_by_rows_matrix_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int count_rows{}; - int size_rows{}; - std::vector input_; - std::vector res; -}; - -} // namespace kurakin_m_min_values_by_rows_matrix_seq \ No newline at end of file diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp deleted file mode 100644 index b07bae29d92..00000000000 --- a/tasks/seq/kurakin_m_min_values_by_rows_matrix/perf_tests/main.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" - -TEST(kurakin_m_min_values_by_rows_matrix_seq, test_pipeline_run) { - int count_rows; - int size_rows; - - // Create data - count_rows = 100; - size_rows = 400; - std::vector global_mat(count_rows * size_rows, 1); - std::vector seq_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - for (size_t i = 0; i < seq_min_vec.size(); i++) { - EXPECT_EQ(1, seq_min_vec[0]); - } -} - -TEST(kurakin_m_min_values_by_rows_matrix_seq, test_task_run) { - int count_rows; - int size_rows; - - // Create data - count_rows = 100; - size_rows = 400; - std::vector global_mat(count_rows * size_rows, 1); - std::vector seq_min_vec(count_rows, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); - taskDataSeq->inputs_count.emplace_back(global_mat.size()); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&size_rows)); - taskDataSeq->inputs_count.emplace_back(static_cast(1)); - taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_min_vec.data())); - taskDataSeq->outputs_count.emplace_back(seq_min_vec.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - for (unsigned i = 0; i < seq_min_vec.size(); i++) { - EXPECT_EQ(1, seq_min_vec[0]); - } -} diff --git a/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp b/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp deleted file mode 100644 index 5efb2be011d..00000000000 --- a/tasks/seq/kurakin_m_min_values_by_rows_matrix/src/ops_seq.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2024 Nesterov Alexander -#include "seq/kurakin_m_min_values_by_rows_matrix/include/ops_seq.hpp" - -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::validation() { - internal_order_test(); - // Check count elements of output - return *taskData->inputs[1] == taskData->outputs_count[0]; -} - -bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::run() { - internal_order_test(); - // Init value for output - count_rows = (int)*taskData->inputs[1]; - size_rows = (int)*taskData->inputs[2]; - res = std::vector(count_rows, 0); - - for (int i = 0; i < count_rows; i++) { - res[i] = *std::min_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); - } - return true; -} - -bool kurakin_m_min_values_by_rows_matrix_seq::TestTaskSequential::post_processing() { - internal_order_test(); - for (int i = 0; i < count_rows; i++) { - reinterpret_cast(taskData->outputs[0])[i] = res[i]; - } - return true; -} From cdc697aa88f53d70bad6c44304090a6283385901 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Wed, 6 Nov 2024 09:19:28 +0800 Subject: [PATCH 117/155] =?UTF-8?q?Revert=20"=D0=92=D0=BE=D0=BB=D0=BE?= =?UTF-8?q?=D1=87=D0=B0=D0=B5=D0=B2=20=D0=A1=D0=B5=D1=80=D0=B0=D1=84=D0=B8?= =?UTF-8?q?=D0=BC=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2027:=20=D0=9F=D0=BE=D0=B4?= =?UTF-8?q?=D1=81=D1=87=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20?= =?UTF-8?q?=D0=BD=D0=B5=D1=81=D0=BE=D0=B2=D0=BF=D0=B0=D0=B4=D0=B0=D1=8E?= =?UTF-8?q?=D1=89=D0=B8=D1=85=20=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=B4=D0=B2=D1=83=D1=85=20=D1=81=D1=82=D1=80=D0=BE?= =?UTF-8?q?=D0=BA"=20(#229)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#92 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11695287544/job/32570456583 image --- .../func_tests/main.cpp | 229 ------------------ .../include/ops_mpi.hpp | 45 ---- .../perf_tests/main.cpp | 91 ------- .../src/ops_mpi.cpp | 126 ---------- .../func_tests/main.cpp | 184 -------------- .../include/ops_seq.hpp | 24 -- .../perf_tests/main.cpp | 81 ------- .../src/ops_seq.cpp | 52 ---- 8 files changed, 832 deletions(-) delete mode 100644 tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp delete mode 100644 tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp delete mode 100644 tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp delete mode 100644 tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp delete mode 100644 tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp delete mode 100644 tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp delete mode 100644 tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp delete mode 100644 tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp diff --git a/tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp b/tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp deleted file mode 100644 index c9d9adcd7e9..00000000000 --- a/tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp +++ /dev/null @@ -1,229 +0,0 @@ -#include - -#include -#include -#include -#include - -#include "mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp" - -namespace volochaev_s_count_characters_27_mpi { - -std::string get_random_string(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - - std::string vec(sz, ' '); - for (int i = 0; i < sz; i++) { - vec[i] += gen() % 256; - } - return vec; -} - -} // namespace volochaev_s_count_characters_27_mpi - -TEST(volochaev_s_count_characters_27_MPI, Test_0) { - boost::mpi::communicator world; - std::vector global_vec(1, volochaev_s_count_characters_27_mpi::get_random_string(20)); - std::vector global_diff(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - // Create data - std::vector reference_diff(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); - taskDataSeq->outputs_count.emplace_back(reference_diff.size()); - - // Create Task - volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), false); - } -} - -TEST(volochaev_s_count_characters_27_MPI, Test_1) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_diff(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int size_str1 = 240; - const int size_str2 = 120; - global_vec = {volochaev_s_count_characters_27_mpi::get_random_string(size_str1), - volochaev_s_count_characters_27_mpi::get_random_string(size_str2)}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); - taskDataPar->outputs_count.emplace_back(global_diff.size()); - } - - volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_diff(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); - taskDataSeq->outputs_count.emplace_back(reference_diff.size()); - - // Create Task - volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_diff[0], global_diff[0]); - } -} - -TEST(volochaev_s_count_characters_27_MPI, Test_2) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_diff(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int string_sz = 120; - std::string s = volochaev_s_count_characters_27_mpi::get_random_string(string_sz); - global_vec = {s, s}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); - taskDataPar->outputs_count.emplace_back(global_diff.size()); - } - - volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_diff(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); - taskDataSeq->outputs_count.emplace_back(reference_diff.size()); - - // Create Task - volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_diff[0], global_diff[0]); - } -} - -TEST(volochaev_s_count_characters_27_MPI, Test_3) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int size_str1 = 240; - const int size_str2 = 120; - global_vec = {volochaev_s_count_characters_27_mpi::get_random_string(size_str2), - volochaev_s_count_characters_27_mpi::get_random_string(size_str1)}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(volochaev_s_count_characters_27_MPI, Test_4) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int size_str = 120; - global_vec = {volochaev_s_count_characters_27_mpi::get_random_string(size_str), - volochaev_s_count_characters_27_mpi::get_random_string(size_str)}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp b/tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp deleted file mode 100644 index 582a1e3724e..00000000000 --- a/tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp +++ /dev/null @@ -1,45 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace volochaev_s_count_characters_27_mpi { - -class Lab1_27_seq : public ppc::core::Task { - public: - explicit Lab1_27_seq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - int res{}; -}; - -class Lab1_27_mpi : public ppc::core::Task { - public: - explicit Lab1_27_mpi(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_, local_input_; - int res{}; - int del{}; - boost::mpi::communicator world; -}; - -} // namespace volochaev_s_count_characters_27_mpi \ No newline at end of file diff --git a/tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp b/tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp deleted file mode 100644 index bfe2e510e3f..00000000000 --- a/tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp +++ /dev/null @@ -1,91 +0,0 @@ -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp" - -TEST(volochaev_s_count_characters_27_mpi, test_pipeline_run) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_string; - if (world.rank() == 0) { - count_size_string = 200000000; - std::string s(count_size_string, ' '); - global_vec = std::vector(2, s); - - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(2, global_sum[0]); - } -} - -TEST(volochaev_s_count_characters_27_mpi, test_task_run) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int string_size; - if (world.rank() == 0) { - string_size = 200000000; - std::string s(string_size, ' '); - global_vec = std::vector(2, s); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(0, global_sum[0]); - } -} diff --git a/tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp b/tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp deleted file mode 100644 index 90d1a84e6a7..00000000000 --- a/tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp +++ /dev/null @@ -1,126 +0,0 @@ -#include "mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::pre_processing() { - internal_order_test(); - // Init vectors - auto tmp1 = reinterpret_cast(taskData->inputs[0])[0]; - auto tmp2 = reinterpret_cast(taskData->inputs[0])[1]; - - input_ = std::vector>(std::min(tmp1.size(), tmp2.size())); - - for (size_t i = 0; i < std::min(tmp1.size(), tmp2.size()); i++) { - input_[i].first = tmp1[i]; - input_[i].second = tmp2[i]; - } - - // Init value for output - res = abs(static_cast(tmp1.size()) - static_cast(tmp2.size())); - return true; -} - -bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::validation() { - internal_order_test(); - // Check count elements of output - return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; -} - -bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::run() { - internal_order_test(); - for (auto [x, y] : input_) { - if (x != y) { - res += 2; - } - } - return true; -} - -bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::post_processing() { - internal_order_test(); - *reinterpret_cast(taskData->outputs[0]) = res; - return true; -} - -bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::pre_processing() { - internal_order_test(); - - // Init value for output - res = 0; - return true; -} - -bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::validation() { - internal_order_test(); - - if (world.rank() == 0) { - // Check count elements of output - return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; - } - return true; -} - -bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::run() { - internal_order_test(); - - std::string tmp1; - std::string tmp2; - int delta = 0; - if (world.rank() == 0) { - tmp1 = reinterpret_cast(taskData->inputs[0])[0]; - tmp2 = reinterpret_cast(taskData->inputs[0])[1]; - - del = abs(static_cast(tmp1.size()) - static_cast(tmp2.size())); - - delta = static_cast(std::min(tmp1.size(), tmp2.size())) / world.size(); - if (taskData->inputs_count[0] % world.size() > 0u) ++delta; - } - - broadcast(world, delta, 0); - - if (world.rank() == 0) { - // Init vectors - input_ = std::vector>(world.size() * delta); - - for (size_t i = 0; i < std::min(tmp1.size(), tmp2.size()); ++i) { - input_[i].first = tmp1[i]; - input_[i].second = tmp2[i]; - } - - for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, input_.data() + proc * delta, delta); - } - } - - local_input_ = std::vector>(delta); - if (world.rank() == 0) { - local_input_ = std::vector>(input_.begin(), input_.begin() + delta); - } else { - world.recv(0, 0, local_input_.data(), delta); - } - - int res1 = 0; - for (auto [x, y] : local_input_) { - if (x != y) { - res1 += 2; - } - } - reduce(world, res1, res, std::plus(), 0); - return true; -} - -bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - *reinterpret_cast(taskData->outputs[0]) = res + del; - } - return true; -} diff --git a/tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp b/tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp deleted file mode 100644 index 7c1154192eb..00000000000 --- a/tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp +++ /dev/null @@ -1,184 +0,0 @@ -#include - -#include -#include - -#include "seq/volochaev_s_count_characters_27/include/ops_seq.hpp" - -namespace volochaev_s_count_characters_27_seq { - -std::string get_random_string(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - - std::string vec(sz, ' '); - for (int i = 0; i < sz; i++) { - vec[i] += gen() % 256; - } - return vec; -} -} // namespace volochaev_s_count_characters_27_seq - -TEST(volochaev_s_count_characters_27_seq, Test_0) { - // Create data - std::vector in = {volochaev_s_count_characters_27_seq::get_random_string(20)}; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(volochaev_s_count_characters_27_seq, Test_1) { - // Create data - std::string s = volochaev_s_count_characters_27_seq::get_random_string(20); - std::vector in(2, s); - std::vector out(1, 0); - - int ans = 0; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(volochaev_s_count_characters_27_seq, Test_2) { - // Create data - std::string s = volochaev_s_count_characters_27_seq::get_random_string(20); - std::string s1 = s; - - s1.back() = static_cast((static_cast(s1.back()) + 1) % 256); - - std::vector in = {s, s1}; - std::vector out(1, 0); - int ans = 2; - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(volochaev_s_count_characters_27_seq, Test_3) { - // Create data - - std::string s = volochaev_s_count_characters_27_seq::get_random_string(6); - std::string s1 = s.substr(0, 2); - - std::vector in = {s, s1}; - std::vector out(1, 0); - int ans = 4; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(volochaev_s_count_characters_27_seq, Test_4) { - // Create data - std::string s = volochaev_s_count_characters_27_seq::get_random_string(6); - std::string s1 = s.substr(0, 2); - - std::vector in = {s1, s}; - std::vector out(1, 0); - int ans = 4; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(volochaev_s_count_characters_27_seq, Test_5) { - // Create data - std::string s = volochaev_s_count_characters_27_seq::get_random_string(6); - std::vector in(2, s); - std::vector out(1, 0); - int ans = 0; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(volochaev_s_count_characters_27_seq, Test_6) { - // Create data - std::string s = volochaev_s_count_characters_27_seq::get_random_string(7); - std::vector in(2, s); - std::vector out(1, 0); - int ans = 0; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp b/tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp deleted file mode 100644 index 7279e6fa3a8..00000000000 --- a/tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace volochaev_s_count_characters_27_seq { - -class Lab1_27 : public ppc::core::Task { - public: - explicit Lab1_27(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - int sz1, sz2; - int res{}; -}; - -} // namespace volochaev_s_count_characters_27_seq \ No newline at end of file diff --git a/tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp b/tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp deleted file mode 100644 index 25c7995c367..00000000000 --- a/tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp +++ /dev/null @@ -1,81 +0,0 @@ -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/volochaev_s_count_characters_27/include/ops_seq.hpp" - -TEST(volochaev_s_count_characters_27_seq, test_pipeline_run) { - // Create data - std::string s(20000000, ' '); - std::vector in(2, s); - std::vector out(1, 0); - - int ans = 0; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ans, out[0]); -} - -TEST(volochaev_s_count_characters_27_seq, test_task_run) { - // Create data - std::string s(20000000, ' '); - std::vector in(2, s); - std::vector out(1, 0); - int ans = 0; - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ans, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp b/tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp deleted file mode 100644 index 498068ea5f9..00000000000 --- a/tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp +++ /dev/null @@ -1,52 +0,0 @@ -#include "seq/volochaev_s_count_characters_27/include/ops_seq.hpp" - -#include -#include -#include - -using namespace std::chrono_literals; - -bool volochaev_s_count_characters_27_seq::Lab1_27::pre_processing() { - internal_order_test(); - // Init value for input and output - std::string input1_ = reinterpret_cast(taskData->inputs[0])[0]; - std::string input2_ = reinterpret_cast(taskData->inputs[0])[1]; - - input_ = std::vector>(std::min(input1_.size(), input2_.size())); - - for (size_t i = 0; i < std::min(input1_.size(), input2_.size()); ++i) { - input_[i].first = input1_[i]; - input_[i].second = input2_[i]; - } - - sz1 = input1_.size(); - sz2 = input2_.size(); - res = 0; - return true; -} - -bool volochaev_s_count_characters_27_seq::Lab1_27::validation() { - internal_order_test(); - // Check count elements of output - return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; -} - -bool volochaev_s_count_characters_27_seq::Lab1_27::run() { - internal_order_test(); - - res = abs(sz1 - sz2); - - for (auto [x, y] : input_) { - if (x != y) { - res += 2; - } - } - - return true; -} - -bool volochaev_s_count_characters_27_seq::Lab1_27::post_processing() { - internal_order_test(); - *reinterpret_cast(taskData->outputs[0]) = res; - return true; -} From b33a3afda4b589322c23c5679cf64d7678529d0c Mon Sep 17 00:00:00 2001 From: artich02 <117100185+artich02@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:29:44 +0300 Subject: [PATCH 118/155] =?UTF-8?q?=D0=A2=D1=8B=D1=88=D0=BA=D0=B5=D0=B2?= =?UTF-8?q?=D0=B8=D1=87=20=D0=90=D1=80=D1=82=D0=B5=D0=BC.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=206.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D0=BD?= =?UTF-8?q?=D0=B0=D1=80=D1=83=D1=88=D0=B5=D0=BD=D0=B8=D0=B9=20=D1=83=D0=BF?= =?UTF-8?q?=D0=BE=D1=80=D1=8F=D0=B4=D0=BE=D1=87=D0=B5=D0=BD=D0=BD=D0=BE?= =?UTF-8?q?=D1=81=D1=82=D0=B8=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8?= =?UTF-8?q?=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#150)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit В последовательной реализации вектор инициализируется, затем выполняется проход по массиву с поэлементным сравнением. При этом вектор нулевого размера воспринимается, как ошибка на этапе валидации. Если текущий элемент больше следующего, счётчик нарушений увеличивается. В параллельной версии (MPI) вектор делится на части, и каждый процесс обрабатывает свою часть в отдельном буфере. Root-процесс распределяет данные, и каждый non-root процесс подсчитывает нарушения локально. Для корректного учёта нарушений на границах подмассивов используется смещение данных на один элемент. По завершении вычислений результаты собираются и суммируются в root-процессе. --- .../func_tests/main.cpp | 177 ++++++++++++++++++ .../include/ops_mpi.hpp | 44 +++++ .../perf_tests/main.cpp | 97 ++++++++++ .../src/ops_mpi.cpp | 123 ++++++++++++ .../func_tests/main.cpp | 126 +++++++++++++ .../include/ops_seq.hpp | 25 +++ .../perf_tests/main.cpp | 82 ++++++++ .../src/ops_seq.cpp | 39 ++++ 8 files changed, 713 insertions(+) create mode 100644 tasks/mpi/tyshkevich_a_num_of_orderly_violations/func_tests/main.cpp create mode 100644 tasks/mpi/tyshkevich_a_num_of_orderly_violations/include/ops_mpi.hpp create mode 100644 tasks/mpi/tyshkevich_a_num_of_orderly_violations/perf_tests/main.cpp create mode 100644 tasks/mpi/tyshkevich_a_num_of_orderly_violations/src/ops_mpi.cpp create mode 100644 tasks/seq/tyshkevich_a_num_of_orderly_violations/func_tests/main.cpp create mode 100644 tasks/seq/tyshkevich_a_num_of_orderly_violations/include/ops_seq.hpp create mode 100644 tasks/seq/tyshkevich_a_num_of_orderly_violations/perf_tests/main.cpp create mode 100644 tasks/seq/tyshkevich_a_num_of_orderly_violations/src/ops_seq.cpp diff --git a/tasks/mpi/tyshkevich_a_num_of_orderly_violations/func_tests/main.cpp b/tasks/mpi/tyshkevich_a_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..3eafedf45df --- /dev/null +++ b/tasks/mpi/tyshkevich_a_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,177 @@ +#include + +#include +#include +#include +#include +#include + +#include "mpi/tyshkevich_a_num_of_orderly_violations/include/ops_mpi.hpp" + +namespace tyshkevich_a_num_of_orderly_violations_mpi { + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +} // namespace tyshkevich_a_num_of_orderly_violations_mpi + +std::string VecToStrTY(std::vector &v) { + std::ostringstream oss; + + if (!v.empty()) { + std::copy(v.begin(), v.end() - 1, std::ostream_iterator(oss, ",")); + oss << v.back(); + } + return oss.str(); +} + +TEST(tyshkevich_a_num_of_orderly_violations_mpi_ftest, Test_Max_10) { + int size = 10; + + // Create data + std::vector global_vec(size); + std::vector result(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(size); + if (world.rank() == 0) { + global_vec = tyshkevich_a_num_of_orderly_violations_mpi::getRandomVector(size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + + // Create Task + tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector local_count(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(local_count.data())); + taskDataSeq->outputs_count.emplace_back(local_count.size()); + + // Create Task + tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(result, local_count) << VecToStrTY(global_vec) << ' ' << size << ' ' << world.size() << std::endl; + } +} + +TEST(tyshkevich_a_num_of_orderly_violations_mpi_ftest, Test_Max_20) { + int size = 20; + + // Create data + std::vector global_vec(size); + std::vector result(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(size); + if (world.rank() == 0) { + global_vec = tyshkevich_a_num_of_orderly_violations_mpi::getRandomVector(size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + + // Create Task + tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector local_count(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(local_count.data())); + taskDataSeq->outputs_count.emplace_back(local_count.size()); + + // Create Task + tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(result, local_count) << VecToStrTY(global_vec) << ' ' << size << ' ' << world.size() << std::endl; + } +} + +TEST(tyshkevich_a_num_of_orderly_violations_mpi_ftest, Test_Max_50) { + int size = 50; + + // Create data + std::vector global_vec(size); + std::vector result(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(size); + if (world.rank() == 0) { + global_vec = tyshkevich_a_num_of_orderly_violations_mpi::getRandomVector(size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + + // Create Task + tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector local_count(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(local_count.data())); + taskDataSeq->outputs_count.emplace_back(local_count.size()); + + // Create Task + tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(result, local_count) << VecToStrTY(global_vec) << ' ' << size << ' ' << world.size() << std::endl; + } +} diff --git a/tasks/mpi/tyshkevich_a_num_of_orderly_violations/include/ops_mpi.hpp b/tasks/mpi/tyshkevich_a_num_of_orderly_violations/include/ops_mpi.hpp new file mode 100644 index 00000000000..de8341ed5a8 --- /dev/null +++ b/tasks/mpi/tyshkevich_a_num_of_orderly_violations/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace tyshkevich_a_num_of_orderly_violations_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int size = 0; + std::vector input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int size = 0; + std::vector input_, local_input_; + std::vector res; + boost::mpi::communicator world; +}; + +} // namespace tyshkevich_a_num_of_orderly_violations_mpi diff --git a/tasks/mpi/tyshkevich_a_num_of_orderly_violations/perf_tests/main.cpp b/tasks/mpi/tyshkevich_a_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..d74676e4873 --- /dev/null +++ b/tasks/mpi/tyshkevich_a_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,97 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/tyshkevich_a_num_of_orderly_violations/include/ops_mpi.hpp" + +TEST(tyshkevich_a_num_of_orderly_violations_mpi_ptest, test_pipeline_run) { + int size = 9999; + + // Create data + std::vector global_vec(size, 1); + std::vector result(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(size); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + + // Create Task + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ((uint32_t)(1), taskDataPar->outputs_count[0]); + } +} + +TEST(tyshkevich_a_num_of_orderly_violations_mpi_ptest, test_task_run) { + int size = 9999; + + // Create data + std::vector global_vec(size, 1); + std::vector result(1, 0); + + boost::mpi::communicator world; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(size); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + + // Create Task + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ((uint32_t)(1), taskDataPar->outputs_count[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/tyshkevich_a_num_of_orderly_violations/src/ops_mpi.cpp b/tasks/mpi/tyshkevich_a_num_of_orderly_violations/src/ops_mpi.cpp new file mode 100644 index 00000000000..796123ea045 --- /dev/null +++ b/tasks/mpi/tyshkevich_a_num_of_orderly_violations/src/ops_mpi.cpp @@ -0,0 +1,123 @@ +#include "mpi/tyshkevich_a_num_of_orderly_violations/include/ops_mpi.hpp" + +#include + +using namespace std::chrono_literals; + +bool tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + // Init vectors + size = taskData->inputs_count[0]; + + input_ = std::vector(size); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < size; i++) { + input_[i] = tmp_ptr[i]; + } + // Init values for output + res = std::vector(1, 0); + + return true; +} + +bool tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count of elements in I/O + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (int i = 1; i < size; i++) { + if (input_[i - 1] > input_[i]) res[0]++; + } + return true; +} + +bool tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res[0]; + return true; +} + +bool tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + // Init vectors + size = taskData->inputs_count[0]; + + if (world.rank() == 0) { + input_ = std::vector(size); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < size; i++) { + input_[i] = tmp_ptr[i]; + } + // Init values for output + res = std::vector(1, 0); + } + + return true; +} + +bool tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskParallel::run() { + internal_order_test(); + if (world.size() == 1) { + for (int i = 1; i < size; i++) { + if (input_[i - 1] > input_[i]) res[0]++; + } + return true; + } + + int world_size = world.size(); + + if (world.rank() > size) { + return true; + } + if (world_size > size + 1) world_size = size + 1; + + int chunkSize = size / (world_size - 1); + int lastChunkSize = size - chunkSize * (world_size - 2); + + if (world.rank() == 0) { + for (int i = 0; i < world_size - 2; i++) { + world.send(i + 1, 0, input_.data() + i * chunkSize, chunkSize + 1); + } + world.send(world_size - 1, 0, input_.data() + (world_size - 2) * chunkSize, lastChunkSize); + + int tempDef; + for (int i = 0; i < world_size - 1; i++) { + world.recv(i + 1, 1, &tempDef, 1); + res[0] += tempDef; + } + } else { + int localChunk = chunkSize + 1; + if (world_size - 1 == world.rank()) localChunk = lastChunkSize; + std::vector chunk(localChunk); + int counter = 0; + world.recv(0, 0, chunk.data(), localChunk); + + for (int i = 1; i < localChunk; i++) { + if (chunk[i - 1] > chunk[i]) counter++; + } + world.send(0, 1, &counter, 1); + } + + return true; +} + +bool tyshkevich_a_num_of_orderly_violations_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res[0]; + } + return true; +} diff --git a/tasks/seq/tyshkevich_a_num_of_orderly_violations/func_tests/main.cpp b/tasks/seq/tyshkevich_a_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..c3a6e05fabd --- /dev/null +++ b/tasks/seq/tyshkevich_a_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,126 @@ +#include + +#include + +#include "seq/tyshkevich_a_num_of_orderly_violations/include/ops_seq.hpp" + +TEST(tyshkevich_a_num_of_orderly_violations_seq_ftest, Test_10) { + int size = 10; + + // Create data + std::vector in{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + std::vector out(1, 0); + int solution = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tyshkevich_a_num_of_orderly_violations_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(solution, out[0]); +} + +TEST(tyshkevich_a_num_of_orderly_violations_seq_ftest, Test_1) { + int size = 1; + + // Create data + std::vector in{1}; + std::vector out(1, 0); + int solution = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tyshkevich_a_num_of_orderly_violations_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(solution, out[0]); +} + +TEST(tyshkevich_a_num_of_orderly_violations_seq_ftest, Test_12) { + int size = 12; + + // Create data + std::vector in{1, 2, 4, 6, 1, 8, 3, 0, 5, 9, 4, 4}; + std::vector out(1, 0); + int solution = 4; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tyshkevich_a_num_of_orderly_violations_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(solution, out[0]); +} + +TEST(tyshkevich_a_num_of_orderly_violations_seq_ftest, Test_20) { + int size = 20; + + // Create data + std::vector in{1, 2, 4, 6, 1, 8, 3, 0, 5, 9, 4, 2, 4, 6, 1, 8, 3, 4, 5, 7}; + std::vector out(1, 0); + int solution = 7; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tyshkevich_a_num_of_orderly_violations_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(solution, out[0]); +} + +TEST(tyshkevich_a_num_of_orderly_violations_seq_ftest, Test_50) { + int size = 50; + + // Create data + std::vector in{1, 2, 4, 6, 1, 8, 3, 0, 5, 9, 4, 2, 4, 6, 1, 8, 3, 4, 5, 7, 4, 5, 6, 7, 1, + 2, 5, 4, 6, 2, 4, 6, 2, 1, 6, 8, 4, 5, 6, 7, 8, 9, 3, 6, 7, 8, 2, 3, 2, 3}; + std::vector out(1, 0); + int solution = 17; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tyshkevich_a_num_of_orderly_violations_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(solution, out[0]); +} diff --git a/tasks/seq/tyshkevich_a_num_of_orderly_violations/include/ops_seq.hpp b/tasks/seq/tyshkevich_a_num_of_orderly_violations/include/ops_seq.hpp new file mode 100644 index 00000000000..57b4397d158 --- /dev/null +++ b/tasks/seq/tyshkevich_a_num_of_orderly_violations/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace tyshkevich_a_num_of_orderly_violations_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int size = 0; + std::vector input_; + std::vector res; + std::string ops; +}; + +} // namespace tyshkevich_a_num_of_orderly_violations_seq \ No newline at end of file diff --git a/tasks/seq/tyshkevich_a_num_of_orderly_violations/perf_tests/main.cpp b/tasks/seq/tyshkevich_a_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..9842773ab44 --- /dev/null +++ b/tasks/seq/tyshkevich_a_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,82 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/tyshkevich_a_num_of_orderly_violations/include/ops_seq.hpp" + +TEST(tyshkevich_a_num_of_orderly_violations_seq_perftest, test_pipeline_run) { + int size = 9999; + + // Create data + std::vector in(size, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ((uint32_t)(1), taskDataSeq->outputs_count.back()); +} + +TEST(tyshkevich_a_num_of_orderly_violations_seq_perftest, test_task_run) { + int size = 9999; + + // Create data + std::vector in(size, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ((uint32_t)(1), taskDataSeq->outputs_count.back()); +} \ No newline at end of file diff --git a/tasks/seq/tyshkevich_a_num_of_orderly_violations/src/ops_seq.cpp b/tasks/seq/tyshkevich_a_num_of_orderly_violations/src/ops_seq.cpp new file mode 100644 index 00000000000..75e8146ed62 --- /dev/null +++ b/tasks/seq/tyshkevich_a_num_of_orderly_violations/src/ops_seq.cpp @@ -0,0 +1,39 @@ +#include "seq/tyshkevich_a_num_of_orderly_violations/include/ops_seq.hpp" + +using namespace std::chrono_literals; + +bool tyshkevich_a_num_of_orderly_violations_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + // Init vectors + size = taskData->inputs_count[0]; + + input_ = std::vector(size); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < size; i++) { + input_[i] = tmp_ptr[i]; + } + // Init values for output + res = std::vector(1, 0); + return true; +} + +bool tyshkevich_a_num_of_orderly_violations_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count of elements in I/O + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool tyshkevich_a_num_of_orderly_violations_seq::TestTaskSequential::run() { + internal_order_test(); + for (int i = 1; i < size; i++) { + if (input_[i - 1] > input_[i]) res[0]++; + } + return true; +} + +bool tyshkevich_a_num_of_orderly_violations_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res[0]; + return true; +} \ No newline at end of file From 5ec293f06846a80502ec46b1db9ff1b4ca6a1b60 Mon Sep 17 00:00:00 2001 From: ChastovSlava <113721053+ChastovSlava@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:32:01 +0300 Subject: [PATCH 119/155] =?UTF-8?q?=D0=A7=D0=B0=D1=81=D1=82=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=92=D1=8F=D1=87=D0=B5=D1=81=D0=BB=D0=B0=D0=B2.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=2024.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D1=91?= =?UTF-8?q?=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=81=D0=BB=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.=20(#160?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Эта задача состоит в подсчете количества слов в строке, обеспечивая две реализации: параллельную с использованием (MS_MPI) и последовательную (seq). Задача состоит в том, чтобы дать определение формулировок стандартов и подсчете их количества. В параллельной версии с MPI строка преобразуется между несколькими процессами, которые одновременно обрабатывают свои части данных, что позволяет быстро выполнять работу с большими объемами текста. В последовательной версии подсчет выполняется постепенно, без разделения на несколько потоков или процессов. Таким образом, задача направлена ​​на демонстрацию подходов для оптимизации шифрования через параллельную обработку данных, где это возможно, а также на проверку корректности реализаций с помощью тестов. --- .../func_tests/main.cpp | 330 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 +++ .../perf_tests/main.cpp | 84 +++++ .../src/ops_mpi.cpp | 86 +++++ .../func_tests/main.cpp | 185 ++++++++++ .../include/ops_seq.hpp | 26 ++ .../perf_tests/main.cpp | 93 +++++ .../src/ops_seq.cpp | 46 +++ 8 files changed, 897 insertions(+) create mode 100644 tasks/mpi/chastov_v_count_words_in_line/func_tests/main.cpp create mode 100644 tasks/mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp create mode 100644 tasks/mpi/chastov_v_count_words_in_line/perf_tests/main.cpp create mode 100644 tasks/mpi/chastov_v_count_words_in_line/src/ops_mpi.cpp create mode 100644 tasks/seq/chastov_v_count_words_in_line/func_tests/main.cpp create mode 100644 tasks/seq/chastov_v_count_words_in_line/include/ops_seq.hpp create mode 100644 tasks/seq/chastov_v_count_words_in_line/perf_tests/main.cpp create mode 100644 tasks/seq/chastov_v_count_words_in_line/src/ops_seq.cpp diff --git a/tasks/mpi/chastov_v_count_words_in_line/func_tests/main.cpp b/tasks/mpi/chastov_v_count_words_in_line/func_tests/main.cpp new file mode 100644 index 00000000000..d96d0eb7ebc --- /dev/null +++ b/tasks/mpi/chastov_v_count_words_in_line/func_tests/main.cpp @@ -0,0 +1,330 @@ +// Copyright 2024 Chastov Vyacheslav +#include + +#include "mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp" + +std::vector createTestInput(int n) { + std::vector wordCountInput; + std::string testString = "This is a proposal to evaluate the performance of a word counting algorithm via MPI. "; + for (int i = 0; i < n; i++) { + for (unsigned long int j = 0; j < testString.length(); j++) { + wordCountInput.push_back(testString[j]); + } + } + return wordCountInput; +} + +// Test to check the behavior of the MPI word counting function with an empty string +TEST(chastov_v_count_words_in_line_mpi, empty_string) { + boost::mpi::communicator world; + std::vector input = {}; + std::vector wordsFound(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskDataPar->outputs_count.emplace_back(wordsFound.size()); + + chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_FALSE(testTaskParallel.validation()); + } +} + +// Test to verify the MPI word counting function with a single word input ("hello") +TEST(chastov_v_count_words_in_line_mpi, words_1) { + boost::mpi::communicator world; + std::vector input; + std::string testString = "hello"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector wordsFound(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskDataPar->outputs_count.emplace_back(wordsFound.size()); + } + + chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector referenceWordFound(1, 0); + + // Create TaskData + std::shared_ptr taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); + + // Create Task + chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordsFound[0], referenceWordFound[0]); + } +} + +// Test to verify the MPI word counting function with an input string containing four words ("My name is Slava") +TEST(chastov_v_count_words_in_line_mpi, words_4) { + boost::mpi::communicator world; + std::vector input; + std::string testString = "My name is Slava"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector wordsFound(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskDataPar->outputs_count.emplace_back(wordsFound.size()); + } + + chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector referenceWordFound(1, 0); + + // Create TaskData + std::shared_ptr taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); + + // Create Task + chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordsFound[0], referenceWordFound[0]); + } +} + +// Test to verify the MPI word counting function with an input string that generates 450 words +TEST(chastov_v_count_words_in_line_mpi, words_300) { + boost::mpi::communicator world; + std::vector input = createTestInput(20); + std::vector wordsFound(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskDataPar->outputs_count.emplace_back(wordsFound.size()); + } + + chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector referenceWordFound(1, 0); + + // Create TaskData + std::shared_ptr taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); + + // Create Task + chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordsFound[0], referenceWordFound[0]); + } +} + +// Test to verify the MPI word counting function with an input string that generates 1500 words +TEST(chastov_v_count_words_in_line_mpi, words_1500) { + boost::mpi::communicator world; + std::vector input = createTestInput(100); + std::vector wordsFound(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskDataPar->outputs_count.emplace_back(wordsFound.size()); + } + + chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector referenceWordFound(1, 0); + + // Create TaskData + std::shared_ptr taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); + + // Create Task + chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordsFound[0], referenceWordFound[0]); + } +} + +// Test to verify the MPI word counting function with an input string that generates 7500 words +TEST(chastov_v_count_words_in_line_mpi, words_7500) { + boost::mpi::communicator world; + std::vector input = createTestInput(500); + std::vector wordsFound(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskDataPar->outputs_count.emplace_back(wordsFound.size()); + } + + chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector referenceWordFound(1, 0); + + // Create TaskData + std::shared_ptr taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); + + // Create Task + chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordsFound[0], referenceWordFound[0]); + } +} + +// The test tests the functionality of counting words in a string with many spaces between words +TEST(chastov_v_count_words_in_line_mpi, multiple_spaces) { + boost::mpi::communicator world; + std::vector input = {'T', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't'}; + std::vector wordsFound(1, 0); + auto taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskDataPar->outputs_count.emplace_back(wordsFound.size()); + } + + auto testTaskParallel = std::make_shared(taskDataPar); + ASSERT_TRUE(testTaskParallel->validation()); + testTaskParallel->pre_processing(); + testTaskParallel->run(); + testTaskParallel->post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(wordsFound[0], 4); + } +} + +// Test checks the word count in a string with multiple spaces between words +TEST(chastov_v_count_words_in_line_mpi, multiple_consecutive_spaces) { + boost::mpi::communicator world; + std::vector input; + std::string testString = "Hello world MPI"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector wordsFound(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskDataPar->outputs_count.emplace_back(wordsFound.size()); + } + + chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); + ASSERT_TRUE(testTaskParallel.validation()); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector referenceWordFound(1, 0); + + // Create TaskData + std::shared_ptr taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskDataSequential->inputs_count.emplace_back(input.size()); + taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); + taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); + + // Create Task + chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(wordsFound[0], referenceWordFound[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp b/tasks/mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp new file mode 100644 index 00000000000..c075b62d385 --- /dev/null +++ b/tasks/mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +// Copyright 2024 Chastov Vyacheslav +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace chastov_v_count_words_in_line_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int wordsFound{}; + int spacesFound{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int localSpaceFound{}; + int wordsFound{}; + int spacesFound{}; + boost::mpi::communicator world; +}; + +} // namespace chastov_v_count_words_in_line_mpi \ No newline at end of file diff --git a/tasks/mpi/chastov_v_count_words_in_line/perf_tests/main.cpp b/tasks/mpi/chastov_v_count_words_in_line/perf_tests/main.cpp new file mode 100644 index 00000000000..b0912694e98 --- /dev/null +++ b/tasks/mpi/chastov_v_count_words_in_line/perf_tests/main.cpp @@ -0,0 +1,84 @@ +// Copyright 2024 Chastov Vyacheslav +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp" + +std::vector createTestInput(int n) { + std::vector wordCountInput; + std::string testString = "This is a proposal to evaluate the performance of a word counting algorithm via MPI. "; + for (int i = 0; i < n; i++) { + for (unsigned long int j = 0; j < testString.length(); j++) { + wordCountInput.push_back(testString[j]); + } + } + return wordCountInput; +} + +std::vector wordCountInput = createTestInput(2000); + +TEST(chastov_v_count_words_in_line_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector input = wordCountInput; + std::vector wordsFound(1, 0); + // Create TaskData + std::shared_ptr taskData = std::make_shared(); + + if (world.rank() == 0) { + taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskData->outputs_count.emplace_back(wordsFound.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(wordsFound[0], 30000); + } +} + +TEST(chastov_v_count_words_in_line_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector input = wordCountInput; + std::vector wordsFound(1, 0); + + std::shared_ptr taskData = std::make_shared(); + + if (world.rank() == 0) { + taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(wordsFound.data())); + taskData->outputs_count.emplace_back(wordsFound.size()); + } + + auto testTask = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(wordsFound[0], 30000); + } +} \ No newline at end of file diff --git a/tasks/mpi/chastov_v_count_words_in_line/src/ops_mpi.cpp b/tasks/mpi/chastov_v_count_words_in_line/src/ops_mpi.cpp new file mode 100644 index 00000000000..69778a4f9b3 --- /dev/null +++ b/tasks/mpi/chastov_v_count_words_in_line/src/ops_mpi.cpp @@ -0,0 +1,86 @@ +// Copyright 2024 Chastov Vyacheslav +#include "mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp" + +bool chastov_v_count_words_in_line_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* temp = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = temp[i]; + } + return true; +} + +bool chastov_v_count_words_in_line_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool chastov_v_count_words_in_line_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (char c : input_) { + if (c == ' ') { + spacesFound++; + } + } + wordsFound = spacesFound + 1; + return true; +} + +bool chastov_v_count_words_in_line_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = wordsFound; + return true; +} + +bool chastov_v_count_words_in_line_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + return true; +} + +bool chastov_v_count_words_in_line_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + return (world.rank() == 0) ? (taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1) : true; +} + +bool chastov_v_count_words_in_line_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int blockSize = 0; + if (world.rank() == 0) { + input_ = std ::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + for (unsigned long int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp[i]; + } + blockSize = taskData->inputs_count[0] / world.size(); + } + boost::mpi::broadcast(world, blockSize, 0); + + local_input_.resize(blockSize); + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * blockSize, blockSize); + } + local_input_ = std::vector(input_.begin(), input_.begin() + blockSize); + } else { + world.recv(0, 0, local_input_.data(), blockSize); + } + for (char c : local_input_) { + if (c == ' ') { + localSpaceFound++; + } + } + boost::mpi::reduce(world, localSpaceFound, spacesFound, std::plus<>(), 0); + if (world.rank() == 0) { + wordsFound = spacesFound + 1; + } + return true; +} + +bool chastov_v_count_words_in_line_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = wordsFound; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/chastov_v_count_words_in_line/func_tests/main.cpp b/tasks/seq/chastov_v_count_words_in_line/func_tests/main.cpp new file mode 100644 index 00000000000..2a667d04383 --- /dev/null +++ b/tasks/seq/chastov_v_count_words_in_line/func_tests/main.cpp @@ -0,0 +1,185 @@ +#include + +#include "seq/chastov_v_count_words_in_line/include/ops_seq.hpp" + +std::vector createTestInput(int n) { + std::vector wordCountInput; + std::string firstSentence = "Hello my name is Slava. Now I am a third year student at Lobachevsky University. "; + for (int i = 0; i < n - 1; i++) { + for (unsigned long int j = 0; j < firstSentence.length(); j++) { + wordCountInput.push_back(firstSentence[j]); + } + } + std::string lastSentence = "This is a proposal to evaluate the performance of a word counting algorithm via MPI."; + for (unsigned long int j = 0; j < lastSentence.length(); j++) { + wordCountInput.push_back(lastSentence[j]); + } + return wordCountInput; +} + +// Test case to check the behavior of the word counting function when given an empty string +TEST(chastov_v_count_words_in_line_seq, empty_string) { + std::vector input = {}; + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), false); +} + +// Test case to verify that the function correctly identifies that a string consisting only of spaces +TEST(chastov_v_count_words_in_line_seq, handles_only_spaces) { + std::vector inputData = {' ', ' ', ' '}; + std::vector outputData(1, 0); + + auto taskDataPtr = std::make_shared(); + taskDataPtr->inputs.emplace_back(reinterpret_cast(inputData.data())); + taskDataPtr->inputs_count.emplace_back(inputData.size()); + taskDataPtr->outputs.emplace_back(reinterpret_cast(outputData.data())); + taskDataPtr->outputs_count.emplace_back(outputData.size()); + + chastov_v_count_words_in_line_seq::TestTaskSequential wordCountTask(taskDataPtr); + ASSERT_TRUE(wordCountTask.validation()); + wordCountTask.pre_processing(); + wordCountTask.run(); + wordCountTask.post_processing(); + + ASSERT_EQ(outputData[0], 0); +} + +// Test case to check the counting functionality for a single word input +TEST(chastov_v_count_words_in_line_seq, word_1) { + std::vector input; + std::string testString = "hello"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 1); +} + +// Test case for counting the number of words in a four word sentence +TEST(chastov_v_count_words_in_line_seq, words_4) { + std::vector input; + std::string testString = "My name is Slava"; + for (unsigned long int j = 0; j < testString.length(); j++) { + input.push_back(testString[j]); + } + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 4); +} + +// Test case to verify the function's ability to handle larger input sizes +// The generated string should contain enough words to yield a count of 450 +TEST(chastov_v_count_words_in_line_seq, words_450) { + std::vector input = createTestInput(30); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 450); +} + +// Test case to check the performance and correctness for an even larger input size +// The created string should contain enough words to yield a count of 1500 +TEST(chastov_v_count_words_in_line_seq, words_1500) { + std::vector input = createTestInput(100); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 1500); +} + +// Test case to evaluate the handling of a very large number of words +// The generated string should be such that the word count is expected to be 7500 +TEST(chastov_v_count_words_in_line_seq, words_7500) { + std::vector input = createTestInput(500); + std::vector out(1, 0); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 7500); +} + +// Test case to check the counting of words that include special characters +// The input contains two words separated by a space, and the expected output is 2 +TEST(chastov_v_count_words_in_line_seq, words_with_special_characters) { + std::vector inputData = {'W', 'o', 'r', 'd', '@', '1', ' ', 'W', 'o', 'r', 'd', '#', '2'}; + std::vector outputData(1, 0); + + auto taskDataPtr = std::make_shared(); + taskDataPtr->inputs.emplace_back(reinterpret_cast(inputData.data())); + taskDataPtr->inputs_count.emplace_back(inputData.size()); + taskDataPtr->outputs.emplace_back(reinterpret_cast(outputData.data())); + taskDataPtr->outputs_count.emplace_back(outputData.size()); + + chastov_v_count_words_in_line_seq::TestTaskSequential wordCountTask(taskDataPtr); + ASSERT_TRUE(wordCountTask.validation()); + wordCountTask.pre_processing(); + wordCountTask.run(); + wordCountTask.post_processing(); + + ASSERT_EQ(outputData[0], 2); +} \ No newline at end of file diff --git a/tasks/seq/chastov_v_count_words_in_line/include/ops_seq.hpp b/tasks/seq/chastov_v_count_words_in_line/include/ops_seq.hpp new file mode 100644 index 00000000000..7e9158c2ddd --- /dev/null +++ b/tasks/seq/chastov_v_count_words_in_line/include/ops_seq.hpp @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace chastov_v_count_words_in_line_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector inputString; + int wordsFound{}; + int spacesFound{}; +}; + +} // namespace chastov_v_count_words_in_line_seq \ No newline at end of file diff --git a/tasks/seq/chastov_v_count_words_in_line/perf_tests/main.cpp b/tasks/seq/chastov_v_count_words_in_line/perf_tests/main.cpp new file mode 100644 index 00000000000..4bfad5ccb47 --- /dev/null +++ b/tasks/seq/chastov_v_count_words_in_line/perf_tests/main.cpp @@ -0,0 +1,93 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/chastov_v_count_words_in_line/include/ops_seq.hpp" + +std::vector createTestInput(int n) { + std::vector wordCountInput; + std::string firstSentence = "Hello my name is Slava. Now I am a third year student at Lobachevsky University. "; + for (int i = 0; i < n - 1; i++) { + for (unsigned long int j = 0; j < firstSentence.length(); j++) { + wordCountInput.push_back(firstSentence[j]); + } + } + std::string lastSentence = "This is a proposal to evaluate the performance of a word counting algorithm via MPI."; + for (unsigned long int j = 0; j < lastSentence.length(); j++) { + wordCountInput.push_back(lastSentence[j]); + } + return wordCountInput; +} + +std::vector wordCountInput = createTestInput(1000); + +TEST(word_count_seq, test_pipeline_run) { + // Create data + std::vector input = wordCountInput; + std::vector word_count(1, 0); + + // Create TaskData + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); + taskData->outputs_count.emplace_back(word_count.size()); + + // Create Task + auto testTask = std::make_shared(taskData); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(word_count[0], 15000); +} + +TEST(word_count_seq, test_task_run) { + // Create data + std::vector input = wordCountInput; + std::vector word_count(1, 0); + + // Create TaskData + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(input.data())); + taskData->inputs_count.emplace_back(input.size()); + taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); + taskData->outputs_count.emplace_back(word_count.size()); + + // Create Task + auto testTask = std::make_shared(taskData); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(word_count[0], 15000); +} \ No newline at end of file diff --git a/tasks/seq/chastov_v_count_words_in_line/src/ops_seq.cpp b/tasks/seq/chastov_v_count_words_in_line/src/ops_seq.cpp new file mode 100644 index 00000000000..9193351eed5 --- /dev/null +++ b/tasks/seq/chastov_v_count_words_in_line/src/ops_seq.cpp @@ -0,0 +1,46 @@ +#include "seq/chastov_v_count_words_in_line/include/ops_seq.hpp" + +bool chastov_v_count_words_in_line_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + inputString = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + inputString[i] = tmp[i]; + } + return true; +} + +bool chastov_v_count_words_in_line_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool chastov_v_count_words_in_line_seq::TestTaskSequential::run() { + internal_order_test(); + spacesFound = 0; + wordsFound = 0; + + bool inWord = false; + + for (char c : inputString) { + if (std::isspace(c) != 0) { + if (inWord) { + inWord = false; + spacesFound++; + } + } else { + if (!inWord) { + inWord = true; + wordsFound++; + } + } + } + + return true; +} + +bool chastov_v_count_words_in_line_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = wordsFound; + return true; +} \ No newline at end of file From c8ea374383769c234df8ecd97caed388d45e8436 Mon Sep 17 00:00:00 2001 From: Irina2004-tech <111091810+Irina2004-tech@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:32:14 +0300 Subject: [PATCH 120/155] =?UTF-8?q?=D0=9A=D1=83=D0=B4=D1=80=D1=8F=D1=88?= =?UTF-8?q?=D0=BE=D0=B2=D0=B0=20=D0=98=D1=80=D0=B8=D0=BD=D0=B0.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=209.=20=D0=A1=D0=BA=D0=B0=D0=BB=D1=8F=D1=80?= =?UTF-8?q?=D0=BD=D0=BE=D0=B5=20=D0=BF=D1=80=D0=BE=D0=B8=D0=B7=D0=B2=D0=B5?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE?= =?UTF-8?q?=D1=80=D0=BE=D0=B2=20(#167)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed seq_perf_tests https://github.com/learning-process/ppc-2024-autumn/pull/69 --- .../vectorDotProductMPIFuncTests.cpp | 277 ++++++++++++++++++ .../include/vectorDotProductMPI.hpp | 37 +++ .../vectorDotProductMPIPerfTests.cpp | 89 ++++++ .../src/vectorDotProductMPI.cpp | 121 ++++++++ .../vectorDotProductSeqFuncTests.cpp | 167 +++++++++++ .../include/vectorDotProductSeq.hpp | 20 ++ .../vectorDotProductSeqPerfTests.cpp | 70 +++++ .../src/vectorDotProductSeq.cpp | 43 +++ 8 files changed, 824 insertions(+) create mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp create mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp create mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp create mode 100644 tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp create mode 100644 tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp create mode 100644 tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp create mode 100644 tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp create mode 100644 tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp b/tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp new file mode 100644 index 00000000000..e4dc493b878 --- /dev/null +++ b/tasks/mpi/kudryashova_i_vector_dot_product/func_tests/vectorDotProductMPIFuncTests.cpp @@ -0,0 +1,277 @@ +#include + +#include + +#include "mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp" + +static int seedOffset = 0; + +std::vector GetRandomVector(int size) { + std::vector vector(size); + std::srand(static_cast(time(nullptr)) + ++seedOffset); + for (int i = 0; i < size; ++i) { + vector[i] = std::rand() % 100 + 1; + } + return vector; +} + +TEST(kudryashova_i_vector_dot_product_mpi, mpi_vector_dot_product_func) { + std::vector vector1 = {8, 7, 6}; + std::vector vector2 = {3, 2, 1}; + ASSERT_EQ(44, kudryashova_i_vector_dot_product_mpi::vectorDotProduct(vector1, vector2)); +} + +TEST(kudryashova_i_vector_dot_product_mpi, dot_product_vector_120) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 120; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vector[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vector[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference[0], result[0]); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, dot_product_vector_360) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 360; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vector[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vector[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference[0], result[0]); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_vectors_equal) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 100; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_not_equal_vectors) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 100; + std::vector vector1 = GetRandomVector(count_size_vector + 1); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_vectors_dot_product) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 100; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_dot_product_empty_vectors) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::vector vector1 = {}; + std::vector vector2 = {}; + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, check_dot_product_empty_and_nonempty_vectors) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::vector vector1 = {}; + std::vector vector2 = {1}; + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, dot_product_vector_1_with_zero) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::vector vector1 = {0}; + std::vector vector2 = {1}; + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, dot_product_vector_1) { + boost::mpi::communicator world; + std::vector> global_vector; + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 1; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vector[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vector[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference[0], result[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp b/tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp new file mode 100644 index 00000000000..75d896be292 --- /dev/null +++ b/tasks/mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp @@ -0,0 +1,37 @@ +#pragma once +#include +#include +#include +#include + +#include "core/task/include/task.hpp" +namespace kudryashova_i_vector_dot_product_mpi { +int vectorDotProduct(const std::vector& vector1, const std::vector& vector2); +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int result{}; +}; +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector local_input1_, local_input2_; + int result{}; + boost::mpi::communicator world; + unsigned int delta; +}; +} // namespace kudryashova_i_vector_dot_product_mpi diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp b/tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp new file mode 100644 index 00000000000..d19b5236d3c --- /dev/null +++ b/tasks/mpi/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductMPIPerfTests.cpp @@ -0,0 +1,89 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp" + +static int seedOffset = 0; +std::vector GetRandomVector(int size) { + std::vector vector(size); + std::srand(static_cast(time(nullptr)) + ++seedOffset); + for (int i = 0; i < size; ++i) { + vector[i] = std::rand() % 100 + 1; + } + return vector; +} + +TEST(kudryashova_i_vector_dot_product_mpi, test_pipeline_run) { + const int count = 15000000; + boost::mpi::communicator world; + std::vector> global_vector; + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + std::vector result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); + } +} + +TEST(kudryashova_i_vector_dot_product_mpi, test_task_run) { + const int count_size_vector = 15000000; + boost::mpi::communicator world; + std::vector> global_vector; + std::vector vector1 = GetRandomVector(count_size_vector); + std::vector vector2 = GetRandomVector(count_size_vector); + std::vector result(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_vector = {vector1, vector2}; + for (size_t i = 0; i < global_vector.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vector[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vector[0].size()); + taskDataPar->inputs_count.emplace_back(global_vector[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataPar->outputs_count.emplace_back(result.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(kudryashova_i_vector_dot_product_mpi::vectorDotProduct(global_vector[0], global_vector[1]), result[0]); + } +} diff --git a/tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp b/tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp new file mode 100644 index 00000000000..f5c4fbcdc10 --- /dev/null +++ b/tasks/mpi/kudryashova_i_vector_dot_product/src/vectorDotProductMPI.cpp @@ -0,0 +1,121 @@ +#include "mpi/kudryashova_i_vector_dot_product/include/vectorDotProductMPI.hpp" + +#include + +int kudryashova_i_vector_dot_product_mpi::vectorDotProduct(const std::vector& vector1, + const std::vector& vector2) { + long long result = 0; + for (unsigned long i = 0; i < vector1.size(); i++) result += vector1[i] * vector2[i]; + return result; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + input_.resize(taskData->inputs.size()); + for (unsigned long i = 0; i < input_.size(); ++i) { + auto* tempPtr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + std::copy(tempPtr, tempPtr + taskData->inputs_count[i], input_[i].begin()); + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && + taskData->outputs.size() == 1 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (unsigned long i = 0; i < input_[0].size(); i++) { + result += input_[1][i] * input_[0][i]; + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + if ((int)(taskData->inputs_count[0]) < world.size()) { + delta = taskData->inputs_count[0]; + } + } + if (world.rank() == 0) { + input_.resize(taskData->inputs.size()); + for (size_t i = 0; i < taskData->inputs.size(); ++i) { + if (taskData->inputs[i] == nullptr || taskData->inputs_count[i] == 0) { + return false; + } + input_[i].resize(taskData->inputs_count[i]); + int* source_ptr = reinterpret_cast(taskData->inputs[i]); + + std::copy(source_ptr, source_ptr + taskData->inputs_count[i], input_[i].begin()); + } + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && + taskData->outputs.size() == 1 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0; + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::run() { + internal_order_test(); + broadcast(world, delta, 0); + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); ++proc) { + world.send(proc, 0, input_[0].data() + proc * delta, delta); + world.send(proc, 1, input_[1].data() + proc * delta, delta); + } + } + local_input1_.resize(delta); + local_input2_.resize(delta); + if (world.rank() == 0) { + std::copy(input_[0].begin(), input_[0].begin() + delta, local_input1_.begin()); + std::copy(input_[1].begin(), input_[1].begin() + delta, local_input2_.begin()); + } else { + world.recv(0, 0, local_input1_.data(), delta); + world.recv(0, 1, local_input2_.data(), delta); + } + int local_result = std::inner_product(local_input1_.begin(), local_input1_.end(), local_input2_.begin(), 0); + std::vector full_results; + gather(world, local_result, full_results, 0); + + if (world.rank() == 0) { + result = std::accumulate(full_results.begin(), full_results.end(), 0); + } + if (world.rank() == 0 && (int)(taskData->inputs_count[0]) < world.size()) { + result = std::inner_product(input_[0].begin(), input_[0].end(), input_[1].begin(), 0); + } + return true; +} + +bool kudryashova_i_vector_dot_product_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + if (!taskData->outputs.empty()) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } else { + return false; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp b/tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp new file mode 100644 index 00000000000..bcee8f9a646 --- /dev/null +++ b/tasks/seq/kudryashova_i_vector_dot_product/func_tests/vectorDotProductSeqFuncTests.cpp @@ -0,0 +1,167 @@ +#include + +#include "seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp" + +static int seedOffset = 0; +std::vector GetRandomVector(int size) { + std::vector vector(size); + std::srand(static_cast(time(nullptr)) + ++seedOffset); + for (int i = 0; i < size; ++i) { + vector[i] = std::rand() % 100 + 1; + } + return vector; +} + +TEST(kudryashova_i_vector_dot_product_seq, check_vector_dot_product_true) { + // Create data + std::vector vector1 = {1, 8, 14}; + std::vector vector2 = {3, 6, 5}; + ASSERT_EQ(121, kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2)); +} + +TEST(kudryashova_i_vector_dot_product_seq, dot_product_vector_size_1) { + const int count = 1; + // Create data + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); +} + +TEST(kudryashova_i_vector_dot_product_seq, dot_product_vector_size_50) { + const int count = 50; + // Create data + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); +} + +TEST(kudryashova_i_vector_dot_product_seq, dot_product_vector_size_120) { + const int count = 120; + // Create data + std::vector out(1, 0); + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); +} + +TEST(kudryashova_i_vector_dot_product_seq, check_equal_vectors) { + const int count = 10; + // Create data + std::vector out(1, 0); + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); +} + +TEST(kudryashova_i_vector_dot_product_seq, checks_not_equal_vector) { + const int count = 10; + // Create data + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count + 1); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(kudryashova_i_vector_dot_product_seq, check_empty_vectors) { + // Create data + std::vector vector1 = {}; + std::vector vector2 = {}; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(kudryashova_i_vector_dot_product_seq, check_run_true) { + // Create data + std::vector out(1, 0); + std::vector vector1 = {1, 8, 14}; + std::vector vector2 = {3, 6, 5}; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kudryashova_i_vector_dot_product::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(121, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp b/tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp new file mode 100644 index 00000000000..1deb397e424 --- /dev/null +++ b/tasks/seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp @@ -0,0 +1,20 @@ +#pragma once +#include +#include + +#include "core/task/include/task.hpp" +namespace kudryashova_i_vector_dot_product { +int vectorDotProduct(const std::vector& vector1, const std::vector& vector2); +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_{}; + int result{}; +}; +} // namespace kudryashova_i_vector_dot_product diff --git a/tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp b/tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp new file mode 100644 index 00000000000..6a6e3051f7c --- /dev/null +++ b/tasks/seq/kudryashova_i_vector_dot_product/perf_tests/vectorDotProductSeqPerfTests.cpp @@ -0,0 +1,70 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp" + +static int seedOffset = 0; +std::vector GetRandomVector(int size) { + std::vector vector(size); + std::srand(static_cast(time(nullptr)) + ++seedOffset); + for (int i = 0; i < size; ++i) { + vector[i] = std::rand() % 100 + 1; + } + return vector; +} + +TEST(kudryashova_i_vector_dot_product_seq, test_pipeline_run) { + const int count_size = 15000000; + std::vector vector1 = GetRandomVector(count_size); + std::vector vector2 = GetRandomVector(count_size); + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + auto testTaskSequential = std::make_shared(taskDataSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); +} + +TEST(kudryashova_i_vector_dot_product_seq, test_task_run) { + const int count = 15000000; + std::vector vector1 = GetRandomVector(count); + std::vector vector2 = GetRandomVector(count); + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(vector2.data())); + taskDataSeq->inputs_count.emplace_back(vector1.size()); + taskDataSeq->inputs_count.emplace_back(vector2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + auto testTaskSequential = std::make_shared(taskDataSeq); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(kudryashova_i_vector_dot_product::vectorDotProduct(vector1, vector2), out[0]); +} \ No newline at end of file diff --git a/tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp b/tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp new file mode 100644 index 00000000000..50c68098ead --- /dev/null +++ b/tasks/seq/kudryashova_i_vector_dot_product/src/vectorDotProductSeq.cpp @@ -0,0 +1,43 @@ +#include "seq/kudryashova_i_vector_dot_product/include/vectorDotProductSeq.hpp" + +int kudryashova_i_vector_dot_product::vectorDotProduct(const std::vector& vector1, + const std::vector& vector2) { + long long result = 0; + for (unsigned long i = 0; i < vector1.size(); i++) result += vector1[i] * vector2[i]; + return result; +} + +bool kudryashova_i_vector_dot_product::TestTaskSequential::pre_processing() { + internal_order_test(); + + input_.resize(taskData->inputs.size()); + for (unsigned long i = 0; i < taskData->inputs.size(); ++i) { + int* source_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i].resize(taskData->inputs_count[i]); + std::copy(source_ptr, source_ptr + taskData->inputs_count[i], input_[i].begin()); + } + result = 0; + return true; +} + +bool kudryashova_i_vector_dot_product::TestTaskSequential::validation() { + internal_order_test(); + return (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && + taskData->outputs.size() == 1 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0; +} + +bool kudryashova_i_vector_dot_product::TestTaskSequential::run() { + internal_order_test(); + for (unsigned long i = 0; i < input_[0].size(); i++) { + result += input_[1][i] * input_[0][i]; + } + return true; +} + +bool kudryashova_i_vector_dot_product::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} \ No newline at end of file From 63b55625096e4ee40c91c55d8d57a3ce21709e52 Mon Sep 17 00:00:00 2001 From: 0xG00SE <61384845+DSolo03@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:35:58 +0300 Subject: [PATCH 121/155] =?UTF-8?q?=D0=A1=D0=BE=D0=BB=D0=BE=D0=B2=D1=8C?= =?UTF-8?q?=D0=B5=D0=B2=20=D0=94=D0=B0=D0=BD=D0=B8=D0=BB=D0=B0.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=203.=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0?= =?UTF-8?q?.=20(#190)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit _Fixed #17 than was reverted in #179_ Описание последовательной задачи: Для каждого из элементов вектора, сравниваем его значение со значением переменной, в которой хранится максимальное значение вектора. Если значение данного элемента вектора больше чем значение переменной, то записываем значение данного элемента вектора в переменную. Описание MPI задачи: Делим входные данные на несколько фрагментов, количество которых является количеством возможных процессов. Каждый из фрагментов, кроме первого, отдаем на исполнение соответствующим процессам. Первый фрагмент будет обработан в том же процессе, что и разделял данные. В каждом из процессов, ищется максимальное значение данного ему фрагмента, соответственно последовательной задачи, и из каждого из процессов, собирается максимальное из всех значений при помощи функции reduce с оператором maximum. Максимальное значение записывается в переменную result и возвращается. --- .../solovyev_d_vector_max/func_tests/main.cpp | 103 ++++++++++++ .../solovyev_d_vector_max/include/header.hpp | 48 ++++++ .../solovyev_d_vector_max/perf_tests/main.cpp | 98 +++++++++++ .../mpi/solovyev_d_vector_max/src/source.cpp | 114 +++++++++++++ .../solovyev_d_vector_max/func_tests/main.cpp | 153 ++++++++++++++++++ .../solovyev_d_vector_max/include/header.hpp | 25 +++ .../solovyev_d_vector_max/perf_tests/main.cpp | 92 +++++++++++ .../seq/solovyev_d_vector_max/src/source.cpp | 44 +++++ 8 files changed, 677 insertions(+) create mode 100644 tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp create mode 100644 tasks/mpi/solovyev_d_vector_max/include/header.hpp create mode 100644 tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp create mode 100644 tasks/mpi/solovyev_d_vector_max/src/source.cpp create mode 100644 tasks/seq/solovyev_d_vector_max/func_tests/main.cpp create mode 100644 tasks/seq/solovyev_d_vector_max/include/header.hpp create mode 100644 tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp create mode 100644 tasks/seq/solovyev_d_vector_max/src/source.cpp diff --git a/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp b/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp new file mode 100644 index 00000000000..0a76ffb660d --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp @@ -0,0 +1,103 @@ +#include + +#include +#include +#include +#include + +#include "mpi/solovyev_d_vector_max/include/header.hpp" +namespace solovyev_d_vector_max_mpi { +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} +} // namespace solovyev_d_vector_max_mpi +TEST(solovyev_d_vector_max_mpi, Test_Max) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 240; + global_vec = solovyev_d_vector_max_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + solovyev_d_vector_max_mpi::VectorMaxMPIParallel VectorMaxMPIParallel(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel.validation(), true); + VectorMaxMPIParallel.pre_processing(); + VectorMaxMPIParallel.run(); + VectorMaxMPIParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxMPISequential(taskDataSeq); + ASSERT_EQ(VectorMaxMPISequential.validation(), true); + VectorMaxMPISequential.pre_processing(); + VectorMaxMPISequential.run(); + VectorMaxMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_2) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + global_vec = solovyev_d_vector_max_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + solovyev_d_vector_max_mpi::VectorMaxMPIParallel VectorMaxMPIParallel(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel.validation(), true); + VectorMaxMPIParallel.pre_processing(); + VectorMaxMPIParallel.run(); + VectorMaxMPIParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxMPISequential(taskDataSeq); + ASSERT_EQ(VectorMaxMPISequential.validation(), true); + VectorMaxMPISequential.pre_processing(); + VectorMaxMPISequential.run(); + VectorMaxMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} diff --git a/tasks/mpi/solovyev_d_vector_max/include/header.hpp b/tasks/mpi/solovyev_d_vector_max/include/header.hpp new file mode 100644 index 00000000000..0b49b459cad --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/include/header.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace solovyev_d_vector_max_mpi { + +int vectorMax(std::vector> v); + +class VectorMaxSequential : public ppc::core::Task { + public: + explicit VectorMaxSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector data; + int result{}; + std::string ops; +}; + +class VectorMaxMPIParallel : public ppc::core::Task { + public: + explicit VectorMaxMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector data, localData; + int result{}; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace solovyev_d_vector_max_mpi \ No newline at end of file diff --git a/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp b/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp new file mode 100644 index 00000000000..5202593af55 --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp @@ -0,0 +1,98 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/solovyev_d_vector_max/include/header.hpp" +namespace solovyev_d_vector_max_mpi { +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} +} // namespace solovyev_d_vector_max_mpi +TEST(solovyev_d_vector_max_mpi, run_pipeline) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 12000000; + global_vec = solovyev_d_vector_max_mpi::getRandomVector(count_size_vector); + global_vec[count_size_vector / 2] = 1024; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto VectorMaxMPIParallel = std::make_shared(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel->validation(), true); + VectorMaxMPIParallel->pre_processing(); + VectorMaxMPIParallel->run(); + VectorMaxMPIParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(VectorMaxMPIParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, global_res[0]); + } +} + +TEST(solovyev_d_vector_max_mpi, run_task) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 12000000; + global_vec = solovyev_d_vector_max_mpi::getRandomVector(count_size_vector); + global_vec[count_size_vector / 2] = 1024; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto VectorMaxMPIParallel = std::make_shared(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel->validation(), true); + VectorMaxMPIParallel->pre_processing(); + VectorMaxMPIParallel->run(); + VectorMaxMPIParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(VectorMaxMPIParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, global_res[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/solovyev_d_vector_max/src/source.cpp b/tasks/mpi/solovyev_d_vector_max/src/source.cpp new file mode 100644 index 00000000000..522d1bb92cf --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/src/source.cpp @@ -0,0 +1,114 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "mpi/solovyev_d_vector_max/include/header.hpp" + +int solovyev_d_vector_max_mpi::vectorMax(std::vector> v) { + int m = std::numeric_limits::min(); + for (std::string::size_type i = 0; i < v.size(); i++) { + if (v[i] > m) { + m = v[i]; + } + } + return m; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::pre_processing() { + internal_order_test(); + + // Determine number of vector elements per process + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + + // Share delta between all processes + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // Convert input data to vector + int* input_ = reinterpret_cast(taskData->inputs[0]); + data = std::vector(input_, input_ + taskData->inputs_count[0]); + + // Send each of processes their portion of data + for (int process = 1; process < world.size(); process++) { + world.send(process, 0, data.data() + process * delta, delta); + } + } + + // Initialize local vector + localData = std::vector(delta); + if (world.rank() == 0) { + // Getting data directly if we in zero process + localData = std::vector(data.begin(), data.begin() + delta); + } else { + // Otherwise, recieving data + world.recv(0, 0, localData.data(), delta); + } + + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); + } + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::run() { + internal_order_test(); + int localResult; + + // Search for maximum vector element in current process data + localResult = vectorMax(localData); + + // Search for maximum vector element using all processes data + reduce(world, localResult, result, boost::mpi::maximum(), 0); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::pre_processing() { + internal_order_test(); + + // Init data vector + int* input_ = reinterpret_cast(taskData->inputs[0]); + data = std::vector(input_, input_ + taskData->inputs_count[0]); + + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::run() { + internal_order_test(); + + // Determine maximum value of data vector + result = vectorMax(data); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} \ No newline at end of file diff --git a/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp b/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp new file mode 100644 index 00000000000..87f8b4ac51c --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp @@ -0,0 +1,153 @@ +#include + +#include +#include + +#include "seq/solovyev_d_vector_max/include/header.hpp" +namespace solovyev_d_vector_max_mpi { +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} +} // namespace solovyev_d_vector_max_mpi +TEST(solovyev_d_vector_max_mpi, Test_Empty) { + // Create data + std::vector in(0, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), false); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_10) { + const int count = 10; + + // Create data + std::vector in = solovyev_d_vector_max_mpi::getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_100) { + const int count = 20; + + // Create data + std::vector in = solovyev_d_vector_max_mpi::getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_1000) { + const int count = 50; + + // Create data + std::vector in = solovyev_d_vector_max_mpi::getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_10000) { + const int count = 70; + + // Create data + std::vector in = solovyev_d_vector_max_mpi::getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_100000) { + const int count = 100; + + // Create data + std::vector in = solovyev_d_vector_max_mpi::getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} diff --git a/tasks/seq/solovyev_d_vector_max/include/header.hpp b/tasks/seq/solovyev_d_vector_max/include/header.hpp new file mode 100644 index 00000000000..712e45ed1ac --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/include/header.hpp @@ -0,0 +1,25 @@ + +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace solovyev_d_vector_max_mpi { +int vectorMax(std::vector> v); +class VectorMaxSequential : public ppc::core::Task { + public: + explicit VectorMaxSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector data; + int result{}; + std::string ops; +}; + +} // namespace solovyev_d_vector_max_mpi \ No newline at end of file diff --git a/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp b/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp new file mode 100644 index 00000000000..54035bcb984 --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp @@ -0,0 +1,92 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/solovyev_d_vector_max/include/header.hpp" +namespace solovyev_d_vector_max_mpi { +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} +} // namespace solovyev_d_vector_max_mpi +TEST(solovyev_d_vector_max_mpi, test_pipeline_run) { + const int count = 12000000; + + // Create data + std::vector in = solovyev_d_vector_max_mpi::getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, test_task_run) { + const int count = 12000000; + + // Create data + std::vector in = solovyev_d_vector_max_mpi::getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, out[0]); +} diff --git a/tasks/seq/solovyev_d_vector_max/src/source.cpp b/tasks/seq/solovyev_d_vector_max/src/source.cpp new file mode 100644 index 00000000000..2b44606f4eb --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/src/source.cpp @@ -0,0 +1,44 @@ +#include +#include +#include + +#include "seq/solovyev_d_vector_max/include/header.hpp" + +int solovyev_d_vector_max_mpi::vectorMax(std::vector> v) { + int m = std::numeric_limits::min(); + for (std::string::size_type i = 0; i < v.size(); i++) { + if (v[i] > m) { + m = v[i]; + } + } + return m; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::pre_processing() { + internal_order_test(); + + // Init data vector + int* input_ = reinterpret_cast(taskData->inputs[0]); + data = std::vector(input_, input_ + taskData->inputs_count[0]); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::run() { + internal_order_test(); + + // Determine maximum value of data vector + result = vectorMax(data); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} \ No newline at end of file From a69a0c5bbdb2477df83aab67884517728686deba Mon Sep 17 00:00:00 2001 From: ArinaTs <114129787+ArinaTs@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:21:37 +0300 Subject: [PATCH 122/155] =?UTF-8?q?=D0=A6=D0=B5=D0=BB=D0=B8=D0=BA=D0=BE?= =?UTF-8?q?=D0=B2=D0=B0=20=D0=90=D1=80=D0=B8=D0=BD=D0=B0.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=202.=20=D0=92=D1=8B=D1=87=D0=B8=D1=81=D0=BB=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D1=81=D1=80=D0=B5=D0=B4=D0=BD=D0=B5=D0=B3?= =?UTF-8?q?=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D1=8F=20?= =?UTF-8?q?=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2?= =?UTF-8?q?=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#99)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Последовательно проходим по всем элементам вектора, считая их сумму. Затем делим на число элементов в векторе. Описание MPI задачи: Заданный вектор делим на подвектора, каждый из них передаем различным потокам. Значения, полученные из потоков складываем между собой внутри главного потока. Затем делим на число элементов вектора. --------- Co-authored-by: ArinaTs --- .../func_tests/main.cpp | 51 +++++ .../include/ops_mpi.hpp | 44 ++++ .../perf_tests/main.cpp | 49 +++++ .../src/ops_mpi.cpp | 107 ++++++++++ .../func_tests/main.cpp | 193 ++++++++++++++++++ .../include/ops_seq.hpp | 23 +++ .../perf_tests/main.cpp | 81 ++++++++ .../src/ops_seq.cpp | 40 ++++ 8 files changed, 588 insertions(+) create mode 100644 tasks/mpi/tselikova_a_average_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/tselikova_a_average_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/tselikova_a_average_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/tselikova_a_average_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/tselikova_a_average_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/tselikova_a_average_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/tselikova_a_average_of_vector_elements/func_tests/main.cpp b/tasks/mpi/tselikova_a_average_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..b3693b83a48 --- /dev/null +++ b/tasks/mpi/tselikova_a_average_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,51 @@ +// Copyright 2024 Tselikova Arina +#include + +#include +#include +#include + +#include "mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp" + +TEST(tselikova_a_average_of_vector_elements_mpi, Test_Average_Vector) { + boost::mpi::communicator world; + std::vector large_vec(1000, 1); + std::vector global_avg{0.0f}; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(large_vec.data())); + taskDataPar->inputs_count.emplace_back(large_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_avg.data())); + taskDataPar->outputs_count.emplace_back(global_avg.size()); + } + + tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + float reference_avg = 1.0f; + + ASSERT_FLOAT_EQ(global_avg[0], reference_avg); + } +} + +TEST(tselikova_a_average_of_vector_elements_mpi, Test_EmptyVector) { + boost::mpi::communicator world; + std::vector empty_vec; + std::vector global_avg{0.0f}; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(empty_vec.data())); + taskDataPar->inputs_count.emplace_back(empty_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_avg.data())); + taskDataPar->outputs_count.emplace_back(global_avg.size()); + + tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} \ No newline at end of file diff --git a/tasks/mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..a6f2b0b2bde --- /dev/null +++ b/tasks/mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace tselikova_a_average_of_vector_elements_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + int sum_; + boost::mpi::communicator world; + int total_elements{}; +}; + +} // namespace tselikova_a_average_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/tselikova_a_average_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/tselikova_a_average_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..f07619f5078 --- /dev/null +++ b/tasks/mpi/tselikova_a_average_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,49 @@ +// Copyright 2024 Tselikova Arina +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp" + +TEST(mpi_example_perf_test1, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_avg(1, 0.0f); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_avg.data())); + taskDataPar->outputs_count.emplace_back(global_avg.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_FLOAT_EQ(1.0f, global_avg[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/tselikova_a_average_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/tselikova_a_average_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..e4785c04cee --- /dev/null +++ b/tasks/mpi/tselikova_a_average_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,107 @@ +// Copyright 2024 Tselikova Arina +#include "mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + int* tmp = reinterpret_cast(taskData->inputs[0]); + input_ = std::vector(taskData->inputs_count[0]); + for (std::size_t i = 0; i < static_cast(taskData->inputs_count[0]); i++) { + input_[i] = tmp[i]; + } + res = 0; + return true; +} + +bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 1 && taskData->outputs_count[0] == 1; +} + +bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + int sum = 0; + for (std::size_t i = 0; i < input_.size(); i++) { + sum += input_[i]; + } + res = static_cast(sum) / input_.size(); + return true; +} + +bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + std::cout << res << std::endl; + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + } + + res = 0; + return true; +} + +bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] >= 1; + } + return true; +} + +bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + total_elements = taskData->inputs_count[0]; + } + broadcast(world, delta, 0); + broadcast(world, total_elements, 0); + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + unsigned int start_index = proc * delta; + unsigned int count = (proc == world.size() - 1) ? (total_elements - start_index) : delta; + world.send(proc, 0, input_.data() + start_index, count); + } + } + local_input_ = std::vector(delta); + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + int local_sum = 0; + for (unsigned int i = 0; i < local_input_.size(); i++) { + local_sum += local_input_[i]; + } + reduce(world, local_sum, sum_, std::plus<>(), 0); + return true; +} + +bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + res = static_cast(sum_) / total_elements; + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/tselikova_a_average_of_vector_elements/func_tests/main.cpp b/tasks/seq/tselikova_a_average_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..3cb5f2a372d --- /dev/null +++ b/tasks/seq/tselikova_a_average_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,193 @@ +// Copyright 2024 Tselikova Arina +#include + +#include + +#include "seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp" + +TEST(tselikova_a_average_of_vector_elements, check_vector_with_similar_elem) { + const int count = 3; + + // Create data + std::vector in(10, count); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(count, out[0]); +} + +TEST(tselikova_a_average_of_vector_elements, check_vector_with_integer_value) { + // Create data + std::vector in{2, 4, 6, 8, 10}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(6, out[0]); +} + +TEST(tselikova_a_average_of_vector_elements, check_empty_vector) { + // Create data + std::vector in(0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(tselikova_a_average_of_vector_elements, check_vector_with_one_elem) { + // Create data + std::vector in{5}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(5, out[0]); +} + +TEST(tselikova_a_average_of_vector_elements, check_vector_with_two_elem) { + // Create data + std::vector in{2, 6}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(4, out[0]); +} + +TEST(tselikova_a_average_of_vector_elements, check_vector_with_three_elem) { + // Create data + std::vector in{1, 2, 3}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(2, out[0]); +} + +TEST(tselikova_a_average_of_vector_elements, check_vector_with_fraction_value) { + // Create data + std::vector in{7, 9, 13}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_NEAR(9.67, out[0], 0.1); +} + +TEST(tselikova_a_average_of_vector_elements, check_vector_with_negative_elem) { + // Create data + std::vector in{-2, -4, -6, -8, -10}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(-6, out[0]); +} + +TEST(tselikova_a_average_of_vector_elements, check_vector_with_zero_av) { + // Create data + std::vector in{2, -2}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp b/tasks/seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..88145f8e4bc --- /dev/null +++ b/tasks/seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,23 @@ +// Copyright 2024 Tselikova Arina +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace tselikova_a_average_of_vector_elements { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_{}; + float res{}; +}; + +} // namespace tselikova_a_average_of_vector_elements \ No newline at end of file diff --git a/tasks/seq/tselikova_a_average_of_vector_elements/perf_tests/main.cpp b/tasks/seq/tselikova_a_average_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..08111defc44 --- /dev/null +++ b/tasks/seq/tselikova_a_average_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,81 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp" + +TEST(tselikova_a_average_of_vector_elements, test_pipeline_run) { + const int count = 100; + + // Create data + std::vector in(1, count); + std::vector out(1, 0.0f); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_FLOAT_EQ(count, out[0]); +} + +TEST(tselikova_a_average_of_vector_elements, test_task_run) { + const int count = 100; + + // Create data + std::vector in(1, count); + std::vector out(1, 0.0f); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/tselikova_a_average_of_vector_elements/src/ops_seq.cpp b/tasks/seq/tselikova_a_average_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..d3ad1299fec --- /dev/null +++ b/tasks/seq/tselikova_a_average_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,40 @@ +// Copyright 2024 Tselikova Arina +#include "seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool tselikova_a_average_of_vector_elements::TestTaskSequential::pre_processing() { + internal_order_test(); + int* tmp = reinterpret_cast(taskData->inputs[0]); + input_ = std::vector(taskData->inputs_count[0]); + for (std::size_t i = 0; i < (std::size_t)taskData->inputs_count[0]; i++) { + input_[i] = tmp[i]; + } + res = 0; + return true; +} + +bool tselikova_a_average_of_vector_elements::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool tselikova_a_average_of_vector_elements::TestTaskSequential::run() { + internal_order_test(); + int sum = 0; + for (std::size_t i = 0; i < input_.size(); i++) { + sum += input_[i]; + } + res = static_cast(sum) / input_.size(); + return true; +} + +bool tselikova_a_average_of_vector_elements::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From 54d1a19e3cb0770a3d4e3fcf1d7b3dd765e45ba9 Mon Sep 17 00:00:00 2001 From: ValeraDanger <47420979+ValeraDanger@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:22:56 +0300 Subject: [PATCH 123/155] =?UTF-8?q?=D0=9F=D0=B5=D1=82=D1=80=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=9E=D0=BB=D0=B5=D0=B3.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87?= =?UTF-8?q?=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=205.?= =?UTF-8?q?=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5?= =?UTF-8?q?=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=87=D0=B5=D1=80=D0=B5?= =?UTF-8?q?=D0=B4=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B9=20=D0=B7=D0=BD=D0=B0?= =?UTF-8?q?=D0=BA=D0=BE=D0=B2=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B9=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8=D1=85=20?= =?UTF-8?q?=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2?= =?UTF-8?q?=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0=20=20(#102)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание последовательной задачи** Задача заключается в подсчёте количества чередований знаков в массиве чисел. Последовательная программа должна пройти по каждому элементу массива, определить знак текущего и предыдущего элемента, и увеличить счётчик чередований при смене знака. Результатом является общее количество чередований знаков в массиве. Такая реализация использует один процесс и выполняет все вычисления на одном ядре процессора. **Описание MPI задачи** Для ускорения вычислений задача параллелизируется с использованием MPI. Массив разбивается на части, и каждый процесс получает свой сегмент для анализа. Каждый процесс подсчитывает чередования знаков в своём сегменте. Для корректного подсчёта чередований на границе между сегментами, процессы передают друг другу крайние элементы, что позволяет учесть возможные чередования между соседними сегментами. В конце каждый процесс передаёт своё количество чередований корневому процессу, где результаты суммируются для получения общего числа чередований в массиве. --- .../func_tests/main.cpp | 344 ++++++++++++++++++ .../include/ops_mpi.hpp | 44 +++ .../perf_tests/main.cpp | 66 ++++ .../src/ops_mpi.cpp | 145 ++++++++ .../func_tests/main.cpp | 134 +++++++ .../include/ops_seq.hpp | 27 ++ .../perf_tests/main.cpp | 93 +++++ .../src/ops_seq.cpp | 42 +++ 8 files changed, 895 insertions(+) create mode 100644 tasks/mpi/petrov_o_num_of_alternations_signs/func_tests/main.cpp create mode 100644 tasks/mpi/petrov_o_num_of_alternations_signs/include/ops_mpi.hpp create mode 100644 tasks/mpi/petrov_o_num_of_alternations_signs/perf_tests/main.cpp create mode 100644 tasks/mpi/petrov_o_num_of_alternations_signs/src/ops_mpi.cpp create mode 100644 tasks/seq/petrov_o_num_of_alternations_signs/func_tests/main.cpp create mode 100644 tasks/seq/petrov_o_num_of_alternations_signs/include/ops_seq.hpp create mode 100644 tasks/seq/petrov_o_num_of_alternations_signs/perf_tests/main.cpp create mode 100644 tasks/seq/petrov_o_num_of_alternations_signs/src/ops_seq.cpp diff --git a/tasks/mpi/petrov_o_num_of_alternations_signs/func_tests/main.cpp b/tasks/mpi/petrov_o_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..34774c2d037 --- /dev/null +++ b/tasks/mpi/petrov_o_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,344 @@ +#include + +#include +#include +#include +#include +#include + +#include "mpi/petrov_o_num_of_alternations_signs/include/ops_mpi.hpp" + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_Simple) { + std::vector input = {1, -2, 3, -4, 5}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_mpi::SequentialTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 4); +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_AllPositive) { + std::vector input = {1, 2, 3, 4, 5}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_mpi::SequentialTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 0); +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_AllNegative) { + std::vector input = {-1, -2, -3, -4, -5}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_mpi::SequentialTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 0); +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_Empty) { + std::vector input = {}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_mpi::SequentialTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 0); +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_OneElement) { + std::vector input = {1}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_mpi::SequentialTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 0); +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_LargeInput) { + const int size = 1000; + std::vector input(size); + std::iota(input.begin(), input.end(), 1); + for (size_t i = 0; i < input.size(); ++i) { + if (i % 2 != 0) { + input[i] *= -1; + } + } + + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_mpi::SequentialTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], static_cast(input.size() - 1)); +} + +TEST(petrov_o_num_of_alternations_signs_par, TestAlternations_Simple) { + boost::mpi::communicator world; + + std::vector input = {1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16, 17, -18, 19, -20}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + if (world.rank() == 0) { + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + } + + petrov_o_num_of_alternations_signs_mpi::ParallelTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + if (world.rank() == 0) { + ASSERT_EQ(output[0], 19); + } +} + +TEST(petrov_o_num_of_alternations_signs_par, TestAlternations_AllPositive) { + boost::mpi::communicator world; + + std::vector input = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + if (world.rank() == 0) { + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + } + + petrov_o_num_of_alternations_signs_mpi::ParallelTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + if (world.rank() == 0) { + ASSERT_EQ(output[0], 0); + } +} + +TEST(petrov_o_num_of_alternations_signs_par, TestAlternations_AllNegative) { + boost::mpi::communicator world; + + std::vector input = {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -17, -18, -19, -20}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + if (world.rank() == 0) { + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + } + + petrov_o_num_of_alternations_signs_mpi::ParallelTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + if (world.rank() == 0) { + ASSERT_EQ(output[0], 0); + } +} + +TEST(petrov_o_num_of_alternations_signs_par, TestAlternations_Empty) { + boost::mpi::communicator world; + + std::vector input = {}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + if (world.rank() == 0) { + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + } + + petrov_o_num_of_alternations_signs_mpi::ParallelTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + if (world.rank() == 0) { + ASSERT_EQ(output[0], 0); + } +} + +TEST(petrov_o_num_of_alternations_signs_par, TestAlternations_OneElement) { + boost::mpi::communicator world; + + std::vector input = {1}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + if (world.rank() == 0) { + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + } + + petrov_o_num_of_alternations_signs_mpi::ParallelTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + if (world.rank() == 0) { + ASSERT_EQ(output[0], 0); + } +} + +TEST(petrov_o_num_of_alternations_signs_par, TestAlternations_LargeInput) { + boost::mpi::communicator world; + + const int size = 1000; + std::vector input(size); + std::iota(input.begin(), input.end(), 1); + for (size_t i = 0; i < input.size(); ++i) { + if (i % 2 != 0) { + input[i] *= -1; + } + } + + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + if (world.rank() == 0) { + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + } + + petrov_o_num_of_alternations_signs_mpi::ParallelTask task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + if (world.rank() == 0) { + ASSERT_EQ(output[0], static_cast(input.size() - 1)); + } +} + +TEST(petrov_o_num_of_alternations_signs_par, TestAlternations_Random) { + boost::mpi::communicator world; + + const int size = 1000; + std::vector input(size); + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dist(-100, 100); + + for (int i = 0; i < size; ++i) { + input[i] = dist(gen); + } + + std::vector seq_output(1); + std::vector par_output(1); + + // Sequential run + std::shared_ptr seq_taskData = std::make_shared(); + seq_taskData->inputs.push_back(reinterpret_cast(input.data())); + seq_taskData->inputs_count.push_back(input.size()); + seq_taskData->outputs.push_back(reinterpret_cast(seq_output.data())); + seq_taskData->outputs_count.push_back(seq_output.size()); + + petrov_o_num_of_alternations_signs_mpi::SequentialTask seq_task(seq_taskData); + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + + // Parallel run + std::shared_ptr par_taskData = std::make_shared(); + if (world.rank() == 0) { + par_taskData->inputs.push_back(reinterpret_cast(input.data())); + par_taskData->inputs_count.push_back(input.size()); + par_taskData->outputs.push_back(reinterpret_cast(par_output.data())); + par_taskData->outputs_count.push_back(par_output.size()); + } + + petrov_o_num_of_alternations_signs_mpi::ParallelTask par_task(par_taskData); + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + ASSERT_EQ(par_output[0], seq_output[0]); + } +} diff --git a/tasks/mpi/petrov_o_num_of_alternations_signs/include/ops_mpi.hpp b/tasks/mpi/petrov_o_num_of_alternations_signs/include/ops_mpi.hpp new file mode 100644 index 00000000000..f981c475068 --- /dev/null +++ b/tasks/mpi/petrov_o_num_of_alternations_signs/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace petrov_o_num_of_alternations_signs_mpi { + +class SequentialTask : public ppc::core::Task { + public: + explicit SequentialTask(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; +}; + +class ParallelTask : public ppc::core::Task { + public: + explicit ParallelTask(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, chunk; + int res{}; + boost::mpi::communicator world; +}; + +} // namespace petrov_o_num_of_alternations_signs_mpi \ No newline at end of file diff --git a/tasks/mpi/petrov_o_num_of_alternations_signs/perf_tests/main.cpp b/tasks/mpi/petrov_o_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..a90dc03f0e3 --- /dev/null +++ b/tasks/mpi/petrov_o_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,66 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/petrov_o_num_of_alternations_signs/include/ops_mpi.hpp" // Обновленный include path + +template +void runPerformanceTest(int size, int num_running) { + std::vector in(size); + std::iota(in.begin(), in.end(), 1); + for (size_t i = 0; i < in.size(); ++i) { + if (i % 2 != 0) { + in[i] *= -1; + } + } + std::vector out(1); + + // Create TaskData + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + + // Create Task + auto task = std::make_shared(taskData); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = num_running; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(task); +} + +TEST(petrov_o_num_of_alternations_signs_seq, test_pipeline_run) { + boost::mpi::communicator world; + runPerformanceTest(100000, 10); +} + +TEST(petrov_o_num_of_alternations_signs_seq, test_task_run) { + boost::mpi::communicator world; + runPerformanceTest(100000, 10); +} + +TEST(petrov_o_num_of_alternations_signs_mpi, test_pipeline_run) { + boost::mpi::communicator world; + runPerformanceTest(100000, 10); +} + +TEST(petrov_o_num_of_alternations_signs_mpi, test_task_run) { + boost::mpi::communicator world; + runPerformanceTest(100000, 10); +} \ No newline at end of file diff --git a/tasks/mpi/petrov_o_num_of_alternations_signs/src/ops_mpi.cpp b/tasks/mpi/petrov_o_num_of_alternations_signs/src/ops_mpi.cpp new file mode 100644 index 00000000000..3ae725005e9 --- /dev/null +++ b/tasks/mpi/petrov_o_num_of_alternations_signs/src/ops_mpi.cpp @@ -0,0 +1,145 @@ +#include "mpi/petrov_o_num_of_alternations_signs/include/ops_mpi.hpp" + +#include +#include + +bool petrov_o_num_of_alternations_signs_mpi::ParallelTask::pre_processing() { + internal_order_test(); + this->res = 0; + return true; +} + +bool petrov_o_num_of_alternations_signs_mpi::ParallelTask::validation() { + internal_order_test(); + + if (world.rank() != 0) return true; + return taskData->outputs_count[0] == 1; +} + +bool petrov_o_num_of_alternations_signs_mpi::ParallelTask::run() { + internal_order_test(); + + int input_size = 0; + + if (world.rank() == 0) { + input_size = taskData->inputs_count[0]; + } + + int active_processes = std::min((int)world.size(), input_size); // Number of active processes + + boost::mpi::broadcast(world, input_size, 0); + boost::mpi::broadcast(world, active_processes, 0); + + if (input_size < 2) { + this->res = 0; + return true; + } + + if (world.rank() >= active_processes) { // end work for all unused processes + return true; + } + + if (world.rank() == 0) { + const int* input = reinterpret_cast(taskData->inputs[0]); + this->input_.resize(input_size); + std::copy(input, input + input_size, std::begin(this->input_)); + + std::vector distribution(active_processes); + std::vector displacement(active_processes); + + int chunk_size = input_size / active_processes; + int remainder = input_size % active_processes; + + for (int i = 0; i < active_processes; ++i) { + distribution[i] = chunk_size + static_cast(i < remainder); // Distribute remainder + displacement[i] = (i == 0) ? 0 : displacement[i - 1] + distribution[i - 1]; + } + + chunk.resize(distribution[world.rank()]); + + boost::mpi::scatterv(world, input, distribution, displacement, chunk.data(), distribution[world.rank()], 0); + + } else { + int chunk_size = input_size / active_processes; + int remainder = input_size % active_processes; + + int distribution = chunk_size + static_cast(world.rank() < remainder); + + chunk.resize(distribution); + + int input; // clang-tidy needs unused input + boost::mpi::scatterv(world, &input, chunk.data(), distribution, 0); + } + + auto local_res = 0; + + for (size_t i = 1; i < chunk.size(); i++) { + if ((chunk[i] < 0) ^ (chunk[i - 1] < 0)) { + local_res++; + } + } + + int last_element = chunk.back(); + int next_element = 0; + + if (world.rank() < active_processes - 1) { + world.send(world.rank() + 1, 0, last_element); + } + + if (world.rank() > 0) { + world.recv(world.rank() - 1, 0, next_element); + if ((chunk.front() < 0) ^ (next_element < 0)) { + local_res++; + } + } + + boost::mpi::reduce(world, local_res, res, std::plus(), 0); + return true; +} + +bool petrov_o_num_of_alternations_signs_mpi::ParallelTask::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res; + } + return true; +} + +bool petrov_o_num_of_alternations_signs_mpi::SequentialTask::pre_processing() { + internal_order_test(); + + const auto input_size = taskData->inputs_count[0]; + + const int* input = reinterpret_cast(taskData->inputs[0]); + this->input_.resize(input_size); + std::copy(input, input + input_size, std::begin(this->input_)); + + this->res = 0; + + return true; +} + +bool petrov_o_num_of_alternations_signs_mpi::SequentialTask::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool petrov_o_num_of_alternations_signs_mpi::SequentialTask::run() { + internal_order_test(); + + if (input_.size() > 1) { + for (size_t i = 1; i < input_.size(); i++) { + if ((input_[i] < 0) ^ (input_[i - 1] < 0)) { + this->res++; + } + } + } + + return true; +} + +bool petrov_o_num_of_alternations_signs_mpi::SequentialTask::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} diff --git a/tasks/seq/petrov_o_num_of_alternations_signs/func_tests/main.cpp b/tasks/seq/petrov_o_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..cf2c0c054fa --- /dev/null +++ b/tasks/seq/petrov_o_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,134 @@ +#include + +#include +#include + +#include "seq/petrov_o_num_of_alternations_signs/include/ops_seq.hpp" + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_Simple) { + std::vector input = {1, -2, 3, -4, 5}; + std::vector output(1); // Вектор для результата + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_seq::TestTaskSequential task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 4); // Ожидаемое количество чередований: 4 +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_AllPositive) { + std::vector input = {1, 2, 3, 4, 5}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_seq::TestTaskSequential task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 0); // Ожидаемое количество чередований: 0 +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_AllNegative) { + std::vector input = {-1, -2, -3, -4, -5}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_seq::TestTaskSequential task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 0); // Ожидаемое количество чередований: 0 +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_Empty) { + std::vector input = {}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_seq::TestTaskSequential task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 0); // Ожидаемое количество чередований: 0 +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_OneElement) { + std::vector input = {1}; + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_seq::TestTaskSequential task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], 0); // Ожидаемое количество чередований: 0 +} + +TEST(petrov_o_num_of_alternations_signs_seq, TestAlternations_LargeInput) { + const int size = 1000; + std::vector input(size); + std::iota(input.begin(), input.end(), 1); // Заполняем числами от 1 до 1000 + for (size_t i = 0; i < input.size(); ++i) { + if (i % 2 != 0) { + input[i] *= -1; + } + } + + std::vector output(1); + + std::shared_ptr taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(input.data())); + taskData->inputs_count.push_back(input.size()); + taskData->outputs.push_back(reinterpret_cast(output.data())); + taskData->outputs_count.push_back(output.size()); + + petrov_o_num_of_alternations_signs_seq::TestTaskSequential task(taskData); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + + ASSERT_EQ(output[0], static_cast(input.size() - 1)); // Ожидаемое количество чередований для чередующихся знаков +} diff --git a/tasks/seq/petrov_o_num_of_alternations_signs/include/ops_seq.hpp b/tasks/seq/petrov_o_num_of_alternations_signs/include/ops_seq.hpp new file mode 100644 index 00000000000..8c21059f970 --- /dev/null +++ b/tasks/seq/petrov_o_num_of_alternations_signs/include/ops_seq.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace petrov_o_num_of_alternations_signs_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + + bool validation() override; + + bool run() override; + + bool post_processing() override; + + private: + std::vector input_{}; + int res{}; +}; + +} // namespace petrov_o_num_of_alternations_signs_seq \ No newline at end of file diff --git a/tasks/seq/petrov_o_num_of_alternations_signs/perf_tests/main.cpp b/tasks/seq/petrov_o_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..77b8e88198b --- /dev/null +++ b/tasks/seq/petrov_o_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,93 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/petrov_o_num_of_alternations_signs/include/ops_seq.hpp" + +TEST(petrov_o_num_of_alternations_signs_seq, test_pipeline_run) { + const int size = 100000; // Большой размер вектора для теста производительности + std::vector in(size); + std::iota(in.begin(), in.end(), 1); + for (size_t i = 0; i < in.size(); ++i) { + if (i % 2 != 0) { + in[i] *= -1; + } + } + std::vector out(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], + static_cast(in.size() - + 1)); // Проверка на ожидаемое количество чередований (size - 1 для чередующихся знаков) +} + +TEST(petrov_o_num_of_alternations_signs_seq, test_task_run) { + const int size = 100000; // Большой размер вектора для теста производительности + std::vector in(size); + std::iota(in.begin(), in.end(), 1); + for (size_t i = 0; i < in.size(); ++i) { + if (i % 2 != 0) { + in[i] *= -1; + } + } + std::vector out(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], + static_cast(in.size() - + 1)); // Проверка на ожидаемое количество чередований (size - 1 для чередующихся знаков) +} diff --git a/tasks/seq/petrov_o_num_of_alternations_signs/src/ops_seq.cpp b/tasks/seq/petrov_o_num_of_alternations_signs/src/ops_seq.cpp new file mode 100644 index 00000000000..d5af4872f01 --- /dev/null +++ b/tasks/seq/petrov_o_num_of_alternations_signs/src/ops_seq.cpp @@ -0,0 +1,42 @@ +#include "seq/petrov_o_num_of_alternations_signs/include/ops_seq.hpp" + +using namespace std::chrono_literals; + +bool petrov_o_num_of_alternations_signs_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + const auto input_size = taskData->inputs_count[0]; + + const int* input = reinterpret_cast(taskData->inputs[0]); + this->input_.resize(input_size); + std::copy(input, input + input_size, std::begin(this->input_)); + + this->res = 0; // Обнуляем счетчик каждый новый запуск + + return true; +} + +bool petrov_o_num_of_alternations_signs_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; // Проверяем, что на выходе ожидается одно число +} + +bool petrov_o_num_of_alternations_signs_seq::TestTaskSequential::run() { + internal_order_test(); + + if (input_.size() > 1) { + for (size_t i = 1; i < input_.size(); i++) { + if ((input_[i] < 0) ^ (input_[i - 1] < 0)) { + this->res++; + } + } + } + + return true; +} + +bool petrov_o_num_of_alternations_signs_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; // Передаем резульбтат + return true; +} From 820bd9b31716b64d109ea8a4b277ec1f2f566a6e Mon Sep 17 00:00:00 2001 From: ArtemPch <131471743+ArtemPch@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:25:51 +0300 Subject: [PATCH 124/155] =?UTF-8?q?=D0=9F=D0=B5=D1=82=D1=80=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=207.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5=20?= =?UTF-8?q?=D0=B1=D0=BB=D0=B8=D0=B7=D0=BA=D0=B8=D1=85=20=D1=81=D0=BE=D1=81?= =?UTF-8?q?=D0=B5=D0=B4=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5?= =?UTF-8?q?=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80?= =?UTF-8?q?=D0=B0.=20(#132)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательная версия: В последовательной версии алгоритма мы сначала проходим по массиву и сравниваем каждую пару соседних элементов, чтобы определить минимальное расстояние между ними. Мы инициализируем переменные для хранения минимального расстояния и соответствующей пары элементов. На каждой итерации цикла мы вычисляем абсолютное расстояние между текущим элементом и следующим элементом. Если это расстояние меньше текущего минимального, мы обновляем минимальное расстояние и запоминаем пару элементов. После завершения цикла мы сохраняем найденную пару в выходные данные. Параллельная версия: В параллельной версии алгоритма мы сначала делим массив на несколько подмассивов в зависимости от количества доступных потоков. Каждый поток обрабатывает свой подмассив, выполняя тот же алгоритм, что и в последовательной версии, для нахождения ближайших соседей в своей части массива. После того как все потоки завершили свою работу, мы собираем результаты от каждого потока и сравниваем минимальные расстояния, чтобы определить глобальную минимальную пару. Если одна из частей массива меньше, чем остальные, это минимальное значение передается корневому процессу для дальнейшей обработки. --- .../func_tests/main.cpp | 245 ++++++++++++++++++ .../include/ops_mpi.hpp | 51 ++++ .../perf_tests/main.cpp | 83 ++++++ .../src/ops_mpi.cpp | 134 ++++++++++ .../func_tests/main.cpp | 101 ++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 80 ++++++ .../src/ops_seq.cpp | 65 +++++ 8 files changed, 783 insertions(+) create mode 100644 tasks/mpi/petrov_a_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/mpi/petrov_a_nearest_neighbor_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/petrov_a_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/petrov_a_nearest_neighbor_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/petrov_a_nearest_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/seq/petrov_a_nearest_neighbor_elements/include/ops_seq.hpp create mode 100644 tasks/seq/petrov_a_nearest_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/seq/petrov_a_nearest_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/petrov_a_nearest_neighbor_elements/func_tests/main.cpp b/tasks/mpi/petrov_a_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..9d6c85ff8fb --- /dev/null +++ b/tasks/mpi/petrov_a_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,245 @@ +#include + +#include +#include +#include +#include + +#include "mpi/petrov_a_nearest_neighbor_elements/include/ops_mpi.hpp" + +namespace petrov_a_nearest_neighbor_elements_mpi { +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} +} // namespace petrov_a_nearest_neighbor_elements_mpi + +TEST(petrov_a_nearest_neighbor_elements_mpi, Test_NearestNeighbor1) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_neighbors(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + global_vec = petrov_a_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_neighbors.data())); + taskDataPar->outputs_count.emplace_back(global_neighbors.size()); + } + + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_neighbors(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_neighbors.data())); + taskDataSeq->outputs_count.emplace_back(reference_neighbors.size()); + + // Create Task + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(std::abs(reference_neighbors[0] - reference_neighbors[1]), + std::abs(global_neighbors[0] - global_neighbors[1])); + } +} + +TEST(petrov_a_nearest_neighbor_elements_mpi, Test_NearestNeighbor2) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_neighbors(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 240; + global_vec = petrov_a_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_neighbors.data())); + taskDataPar->outputs_count.emplace_back(global_neighbors.size()); + } + + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_neighbors(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_neighbors.data())); + taskDataSeq->outputs_count.emplace_back(reference_neighbors.size()); + + // Create Task + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(std::abs(reference_neighbors[0] - reference_neighbors[1]), + std::abs(global_neighbors[0] - global_neighbors[1])); + } +} + +TEST(petrov_a_nearest_neighbor_elements_mpi, Test_NearestNeighbor3) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_neighbors(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 360; + global_vec = petrov_a_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_neighbors.data())); + taskDataPar->outputs_count.emplace_back(global_neighbors.size()); + } + + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_neighbors(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_neighbors.data())); + taskDataSeq->outputs_count.emplace_back(reference_neighbors.size()); + + // Create Task + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(std::abs(reference_neighbors[0] - reference_neighbors[1]), + std::abs(global_neighbors[0] - global_neighbors[1])); + } +} + +TEST(petrov_a_nearest_neighbor_elements_mpi, Test_NearestNeighbor4) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_neighbors(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 36; + global_vec = petrov_a_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_neighbors.data())); + taskDataPar->outputs_count.emplace_back(global_neighbors.size()); + } + + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_neighbors(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_neighbors.data())); + taskDataSeq->outputs_count.emplace_back(reference_neighbors.size()); + + // Create Task + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(std::abs(reference_neighbors[0] - reference_neighbors[1]), + std::abs(global_neighbors[0] - global_neighbors[1])); + } +} + +TEST(petrov_a_nearest_neighbor_elements_mpi, Test_NearestNeighbor5) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_neighbors(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + global_vec = petrov_a_nearest_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_neighbors.data())); + taskDataPar->outputs_count.emplace_back(global_neighbors.size()); + } + + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_neighbors(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_neighbors.data())); + taskDataSeq->outputs_count.emplace_back(reference_neighbors.size()); + + // Create Task + petrov_a_nearest_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(std::abs(reference_neighbors[0] - reference_neighbors[1]), + std::abs(global_neighbors[0] - global_neighbors[1])); + } +} \ No newline at end of file diff --git a/tasks/mpi/petrov_a_nearest_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/petrov_a_nearest_neighbor_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..3adddec47ec --- /dev/null +++ b/tasks/mpi/petrov_a_nearest_neighbor_elements/include/ops_mpi.hpp @@ -0,0 +1,51 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace petrov_a_nearest_neighbor_elements_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + + std::pair closest_pair_; + int min_distance_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + + std::pair closest_pair_; + int min_distance_; + boost::mpi::communicator world; +}; + +} // namespace petrov_a_nearest_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/petrov_a_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/petrov_a_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..a601968a0e3 --- /dev/null +++ b/tasks/mpi/petrov_a_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,83 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/petrov_a_nearest_neighbor_elements/include/ops_mpi.hpp" + +TEST(petrov_a_nearest_neighbor_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec(100000, 5); + std::vector global_neighbors(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_neighbors.data())); + taskDataPar->outputs_count.emplace_back(global_neighbors.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(5, global_neighbors[0]); + } +} + +TEST(petrov_a_nearest_neighbor_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec(100000, 5); + std::vector global_neighbors(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_neighbors.data())); + taskDataPar->outputs_count.emplace_back(global_neighbors.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(5, global_neighbors[0]); + } +} diff --git a/tasks/mpi/petrov_a_nearest_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/petrov_a_nearest_neighbor_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..53be7638fa7 --- /dev/null +++ b/tasks/mpi/petrov_a_nearest_neighbor_elements/src/ops_mpi.cpp @@ -0,0 +1,134 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/petrov_a_nearest_neighbor_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool petrov_a_nearest_neighbor_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + min_distance_ = std::numeric_limits::max(); + closest_pair_ = {0, 1}; + return true; +} + +bool petrov_a_nearest_neighbor_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 2; +} + +bool petrov_a_nearest_neighbor_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size() - 1; i++) { + int distance = abs(input_[i + 1] - input_[i]); + if (distance < min_distance_) { + min_distance_ = distance; + closest_pair_ = {input_[i], input_[i + 1]}; + } + } + return true; +} + +bool petrov_a_nearest_neighbor_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = closest_pair_.first; + reinterpret_cast(taskData->outputs[0])[1] = closest_pair_.second; + return true; +} + +bool petrov_a_nearest_neighbor_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + } + + min_distance_ = std::numeric_limits::max(); + return true; +} + +bool petrov_a_nearest_neighbor_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if (taskData->inputs_count[0] < 2) { + return false; + } + return taskData->outputs_count[0] == 2; + } + return true; +} + +bool petrov_a_nearest_neighbor_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + std::pair local_pair; + + unsigned int delta = 0; + unsigned int dop = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + dop = taskData->inputs_count[0] % world.size(); + } + broadcast(world, delta, 0); + + int local_min_distance = std::numeric_limits::max(); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size() - 1; proc++) { + world.send(proc, 0, input_.data() + proc * delta + dop, delta + 1); + } + if (world.size() != 1) { + world.send(world.size() - 1, 0, input_.data() + dop + (world.size() - 1) * delta, delta); + } + } + if (world.rank() == 0) { + local_input_ = std::vector(delta + dop + ((world.size() == 1) ? 0 : 1)); + local_input_ = std::vector(input_.begin(), input_.begin() + dop + delta + ((world.size() == 1) ? 0 : 1)); + } else if (world.rank() < world.size() - 1) { + local_input_ = std::vector(delta + 1); + world.recv(0, 0, local_input_.data(), delta + 1); + } else { + local_input_ = std::vector(delta); + world.recv(0, 0, local_input_.data(), delta); + } + + for (size_t i = 0; i < local_input_.size() - 1; i++) { + int distance = abs(local_input_[i + 1] - local_input_[i]); + if (distance < local_min_distance) { + local_min_distance = distance; + local_pair = {local_input_[i], local_input_[i + 1]}; + } + } + + std::pair global_pair; + reduce( + world, local_pair, global_pair, + [](const std::pair& a, const std::pair& b) { + return (std::abs(a.second - a.first) < std::abs(b.second - b.first)) ? a : b; + }, + 0); + + if (world.rank() == 0) { + closest_pair_ = global_pair; + } + + return true; +} + +bool petrov_a_nearest_neighbor_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = closest_pair_.first; + reinterpret_cast(taskData->outputs[0])[1] = closest_pair_.second; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/petrov_a_nearest_neighbor_elements/func_tests/main.cpp b/tasks/seq/petrov_a_nearest_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..2c9c6f7d8ba --- /dev/null +++ b/tasks/seq/petrov_a_nearest_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,101 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/petrov_a_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(petrov_a_nearest_neighbor_elements_seq, SUM20) { + // Create data + std::vector in{8, 3}; + std::vector out(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + petrov_a_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + std::vector T{8, 3}; + ASSERT_EQ(T[0], out[0]); + ASSERT_EQ(T[1], out[1]); +} + +TEST(petrov_a_nearest_neighbor_elements_seq, SUM50) { + // Create data + std::vector in{-10, -5, -3, 2, 7, 12}; + std::vector out(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + petrov_a_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + + testTaskSequential.post_processing(); + + std::vector T{-5, -3}; + ASSERT_EQ(T[0], out[0]); + ASSERT_EQ(T[1], out[1]); +} + +TEST(petrov_a_nearest_neighbor_elements_seq, SUM70) { + // Create data + std::vector in{10, 8, 6, 4, 2, 0}; + std::vector out(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + petrov_a_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + std::vector T{10, 8}; + ASSERT_EQ(T[0], out[0]); + ASSERT_EQ(T[1], out[1]); +} + +TEST(petrov_a_nearest_neighbor_elements_seq, SUM100) { + // Create data + std::vector in{5, 5, 5, 5, 5, 5}; + std::vector out(2, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + petrov_a_nearest_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + std::vector T{5, 5}; + ASSERT_EQ(T[0], out[0]); + ASSERT_EQ(T[1], out[1]); +} \ No newline at end of file diff --git a/tasks/seq/petrov_a_nearest_neighbor_elements/include/ops_seq.hpp b/tasks/seq/petrov_a_nearest_neighbor_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..01fdb2f5910 --- /dev/null +++ b/tasks/seq/petrov_a_nearest_neighbor_elements/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace petrov_a_nearest_neighbor_elements_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res; +}; + +} // namespace petrov_a_nearest_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/petrov_a_nearest_neighbor_elements/perf_tests/main.cpp b/tasks/seq/petrov_a_nearest_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..2b0b5e0c7d7 --- /dev/null +++ b/tasks/seq/petrov_a_nearest_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,80 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/petrov_a_nearest_neighbor_elements/include/ops_seq.hpp" + +TEST(petrov_a_nearest_neighbor_elements_seq, test_pipeline_run) { + // Create data + std::vector in = {1, 3, 7, 5, 6, 10, 12, 14, 15, 2}; + + std::vector out(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], 5); + ASSERT_EQ(out[1], 6); +} + +TEST(petrov_a_nearest_neighbor_elements_seq, test_task_run) { + std::vector in = {1, 3, 7, 5, 6, 10, 12, 14, 15, 2}; + + std::vector out(2, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(out[0], 5); + ASSERT_EQ(out[1], 6); +} \ No newline at end of file diff --git a/tasks/seq/petrov_a_nearest_neighbor_elements/src/ops_seq.cpp b/tasks/seq/petrov_a_nearest_neighbor_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..4423d5e7998 --- /dev/null +++ b/tasks/seq/petrov_a_nearest_neighbor_elements/src/ops_seq.cpp @@ -0,0 +1,65 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/petrov_a_nearest_neighbor_elements/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool petrov_a_nearest_neighbor_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + int size = taskData->inputs_count[0]; + input_.resize(size); + + int* input_data = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < size; ++i) { + input_[i] = input_data[i]; + } + + res.resize(2); + return true; +} + +bool petrov_a_nearest_neighbor_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + + bool isValid = (!taskData->inputs_count.empty()) && (!taskData->inputs.empty()) && (!taskData->outputs.empty()); + + return isValid; +} + +bool petrov_a_nearest_neighbor_elements_seq::TestTaskSequential::run() { + internal_order_test(); + + size_t size = input_.size(); + if (size < 2) { + return false; + } + int min_difference = std::numeric_limits::max(); + size_t min_index = 0; + + for (size_t i = 0; i < size - 1; ++i) { + int difference = std::abs(input_[i] - input_[i + 1]); + if (difference < min_difference) { + min_difference = difference; + min_index = i; + } + } + + res[0] = input_[min_index]; + res[1] = input_[min_index + 1]; + + return true; +} + +bool petrov_a_nearest_neighbor_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + int* output_ = reinterpret_cast(taskData->outputs[0]); + output_[0] = res[0]; + output_[1] = res[1]; + + return true; +} From d38242c305fe92bff84d64dc9474a18334d715d6 Mon Sep 17 00:00:00 2001 From: PolinaSidorina <113776414+PolinaSidorina@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:26:44 +0300 Subject: [PATCH 125/155] =?UTF-8?q?=D0=A1=D0=B8=D0=B4=D0=BE=D1=80=D0=B8?= =?UTF-8?q?=D0=BD=D0=B0=20=D0=9F=D0=BE=D0=BB=D0=B8=D0=BD=D0=B0.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=2026.=20=D0=9F=D1=80=D0=BE=D0=B2=D0=B5=D1=80?= =?UTF-8?q?=D0=BA=D0=B0=20=D0=BB=D0=B5=D0=BA=D1=81=D0=B8=D0=BA=D0=BE=D0=B3?= =?UTF-8?q?=D1=80=D0=B0=D1=84=D0=B8=D1=87=D0=B5=D1=81=D0=BA=D0=BE=D0=B9=20?= =?UTF-8?q?=D1=83=D0=BF=D0=BE=D1=80=D1=8F=D0=B4=D0=BE=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D0=BE=D1=81=D1=82=D0=B8=20=D0=B4=D0=B2=D1=83=D1=85=20?= =?UTF-8?q?=D1=81=D1=82=D1=80=D0=BE=D0=BA.=20(#145)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательная задача: Сравнивается по два элемента каждой строки последовательно до того, пока не найдутся первые два разных символа. Затем определяется, какой из этих символов больше, и сравниваются строки целиком. Если первая строка больше - выводится 1, если вторая - 0. Параллельная задача: Каждая из двух строк делится на меньшие строки одинакового размера, количество таких строк зависит от числа процессов. Соответственно каждая строка отправляется в свой процесс, где происходит сравнение элементов по алгоритму последовательной задачи. На выходе процесса 0 - вторая строка больше, 1 - первая строка больше, 2 - строки одинаковые. Если процесс закончился не с выходом 2, то результат записывается как окончательный и сравнение заканчивается. --- .../func_tests/main.cpp | 439 ++++++++++++++++++ .../include/ops_mpi.hpp | 44 ++ .../perf_tests/main.cpp | 87 ++++ .../src/ops_mpi.cpp | 137 ++++++ .../func_tests/main.cpp | 104 +++++ .../include/ops_seq.hpp | 22 + .../perf_tests/main.cpp | 75 +++ .../src/ops_seq.cpp | 39 ++ 8 files changed, 947 insertions(+) create mode 100644 tasks/mpi/sidorina_p_check_lexicographic_order/func_tests/main.cpp create mode 100644 tasks/mpi/sidorina_p_check_lexicographic_order/include/ops_mpi.hpp create mode 100644 tasks/mpi/sidorina_p_check_lexicographic_order/perf_tests/main.cpp create mode 100644 tasks/mpi/sidorina_p_check_lexicographic_order/src/ops_mpi.cpp create mode 100644 tasks/seq/sidorina_p_check_lexicographic_order/func_tests/main.cpp create mode 100644 tasks/seq/sidorina_p_check_lexicographic_order/include/ops_seq.hpp create mode 100644 tasks/seq/sidorina_p_check_lexicographic_order/perf_tests/main.cpp create mode 100644 tasks/seq/sidorina_p_check_lexicographic_order/src/ops_seq.cpp diff --git a/tasks/mpi/sidorina_p_check_lexicographic_order/func_tests/main.cpp b/tasks/mpi/sidorina_p_check_lexicographic_order/func_tests/main.cpp new file mode 100644 index 00000000000..ed9fbfe895a --- /dev/null +++ b/tasks/mpi/sidorina_p_check_lexicographic_order/func_tests/main.cpp @@ -0,0 +1,439 @@ +// Copyright 2024 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/sidorina_p_check_lexicographic_order/include/ops_mpi.hpp" + +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_1st_element_0) { + boost::mpi::communicator world; + std::vector> str_ = {{'e', 'f', 'g', 'u'}, {'z', 'f', 'l', 'p'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(0, res[0]); + } +} + +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_1st_element_1) { + boost::mpi::communicator world; + std::vector> str_ = {{'z', 'f', 'g', 'u'}, {'e', 'f', 'l', 'p'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(1, res[0]); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_2nd_element_0) { + boost::mpi::communicator world; + std::vector> str_ = {{'e', 'f', 'g', 'u'}, {'e', 'x', 'l', 'p'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(0, res[0]); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_2nd_element_1) { + boost::mpi::communicator world; + std::vector> str_ = {{'e', 'x', 'g', 'u'}, {'e', 'f', 'l', 'p'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(1, res[0]); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_3d_element_0) { + boost::mpi::communicator world; + std::vector> str_ = {{'e', 'f', 'g', 'u'}, {'e', 'f', 'l', 'p'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(0, res[0]); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_3d_element_1) { + boost::mpi::communicator world; + std::vector> str_ = {{'e', 'f', 'l', 'u'}, {'e', 'f', 'g', 'p'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(1, res[0]); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_4_element_0) { + boost::mpi::communicator world; + std::vector> str_ = {{'e', 'f', 'g', 'a'}, {'e', 'f', 'g', 'p'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(0, res[0]); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_4_element_1) { + boost::mpi::communicator world; + std::vector> str_ = {{'e', 'f', 'g', 'z'}, {'e', 'f', 'g', 'p'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(1, res[0]); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_equal_elements) { + boost::mpi::communicator world; + std::vector> str_ = {{'e', 'f', 'g', 'k'}, {'e', 'f', 'g', 'k'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(2, res[0]); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_3_and_2_equal_elements_0) { + boost::mpi::communicator world; + std::vector> str_ = {{'a', 'b'}, {'a', 'b', 'a'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(0, res[0]); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_difference_3_and_2_equal_elements_1) { + boost::mpi::communicator world; + std::vector> str_ = {{'a', 'b', 'a'}, {'a', 'b'}}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector ref_res(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataSeq->inputs_count.emplace_back(str_.size()); + taskDataSeq->inputs_count.emplace_back(str_[0].size()); + taskDataSeq->inputs_count.emplace_back(str_[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_res.data())); + taskDataSeq->outputs_count.emplace_back(ref_res.size()); + sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(ref_res[0], res[0]); + ASSERT_EQ(1, res[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/sidorina_p_check_lexicographic_order/include/ops_mpi.hpp b/tasks/mpi/sidorina_p_check_lexicographic_order/include/ops_mpi.hpp new file mode 100644 index 00000000000..01f924817ab --- /dev/null +++ b/tasks/mpi/sidorina_p_check_lexicographic_order/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +// Copyright 2024 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace sidorina_p_check_lexicographic_order_mpi { +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector local_input1_, local_input2_; + int res{}; + boost::mpi::communicator world; +}; +} // namespace sidorina_p_check_lexicographic_order_mpi \ No newline at end of file diff --git a/tasks/mpi/sidorina_p_check_lexicographic_order/perf_tests/main.cpp b/tasks/mpi/sidorina_p_check_lexicographic_order/perf_tests/main.cpp new file mode 100644 index 00000000000..91416a20d09 --- /dev/null +++ b/tasks/mpi/sidorina_p_check_lexicographic_order/perf_tests/main.cpp @@ -0,0 +1,87 @@ +// Copyright 2024 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/sidorina_p_check_lexicographic_order/include/ops_mpi.hpp" + +TEST(sidorina_p_check_lexicographic_order_mpi, Test_0) { + boost::mpi::communicator world; + std::vector str1(400000, 'e'); + std::vector str2(399999, 'e'); + str2.push_back('f'); + std::vector> str_ = {str1, str2}; + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res[0], 0); + } +} +TEST(sidorina_p_check_lexicographic_order_mpi, Test_1) { + boost::mpi::communicator world; + std::vector str1(400000, 'f'); + std::vector str2(399999, 'f'); + str2.push_back('a'); + std::vector> str_ = {str1, str2}; + std::vector res(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[0].data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(str_[1].data())); + taskDataPar->inputs_count.emplace_back(str_.size()); + taskDataPar->inputs_count.emplace_back(str_[0].size()); + taskDataPar->inputs_count.emplace_back(str_[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res[0], 1); + } +} \ No newline at end of file diff --git a/tasks/mpi/sidorina_p_check_lexicographic_order/src/ops_mpi.cpp b/tasks/mpi/sidorina_p_check_lexicographic_order/src/ops_mpi.cpp new file mode 100644 index 00000000000..39065e71785 --- /dev/null +++ b/tasks/mpi/sidorina_p_check_lexicographic_order/src/ops_mpi.cpp @@ -0,0 +1,137 @@ +// Copyright 2024 Nesterov Alexander +#include "mpi/sidorina_p_check_lexicographic_order/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_.resize(taskData->inputs_count[0]); + for (unsigned int i = 0; i < 2; i++) input_[i].resize(taskData->inputs_count[i + 1]); + for (size_t i = 0; i < taskData->inputs_count[0]; ++i) { + const char* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], input_[i].begin()); + } + res = 0; + return true; +} + +bool sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < std::min(input_[0].size(), input_[1].size()); ++i) { + if (input_[0][i] == input_[1][i]) res = 2; + if (input_[0][i] > input_[1][i]) { + res = 1; + break; + } + if (input_[0][i] < input_[1][i]) { + res = 0; + break; + } + } + if (res == 2 && input_[0].size() != input_[1].size()) { + if (input_[0].size() > input_[1].size()) { + res = 1; + } else { + res = 0; + } + } + return true; +} + +bool sidorina_p_check_lexicographic_order_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = std::min(taskData->inputs_count[1], taskData->inputs_count[2]) / world.size(); + } + broadcast(world, delta, 0); + if (world.rank() == 0) { + input_.resize(taskData->inputs_count[0]); + for (unsigned int i = 0; i < 2; i++) input_[i].resize(taskData->inputs_count[i + 1]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], input_[i].begin()); + } + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_[0].data() + delta * proc, delta); + world.send(proc, 1, input_[1].data() + delta * proc, delta); + } + } + if (world.rank() == 0) { + local_input1_ = std::vector(input_[0].begin(), input_[0].begin() + delta); + local_input2_ = std::vector(input_[1].begin(), input_[1].begin() + delta); + } else { + local_input1_.resize(delta); + local_input2_.resize(delta); + world.recv(0, 0, local_input1_.data(), delta); + world.recv(0, 1, local_input2_.data(), delta); + } + res = 2; + return true; +} + +bool sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int local_res = 2; + for (size_t i = 0; i < local_input1_.size(); i++) { + if (local_input1_[i] > local_input2_[i]) { + local_res = 1; + break; + } + if (local_input1_[i] < local_input2_[i]) { + local_res = 0; + break; + } + } + std::vector full_result; + boost::mpi::gather(world, local_res, full_result, 0); + if (world.rank() == 0) { + for (int result : full_result) { + if (result != 2) { + res = result; + break; + } + } + if (res == 2 && input_[0].size() != input_[1].size()) { + if (input_[0].size() > input_[1].size()) { + res = 1; + } else { + res = 0; + } + } + } + return true; +} +bool sidorina_p_check_lexicographic_order_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/sidorina_p_check_lexicographic_order/func_tests/main.cpp b/tasks/seq/sidorina_p_check_lexicographic_order/func_tests/main.cpp new file mode 100644 index 00000000000..66257dc610f --- /dev/null +++ b/tasks/seq/sidorina_p_check_lexicographic_order/func_tests/main.cpp @@ -0,0 +1,104 @@ +// Copyright 2024 Nesterov Alexander +#include + +#include + +#include "seq/sidorina_p_check_lexicographic_order/include/ops_seq.hpp" + +TEST(sidorina_p_check_lexicographic_order_seq, Test_3_elements) { + std::vector> in = {{'e', 'f', 'g'}, {'e', 'k', 'g'}}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + sidorina_p_check_lexicographic_order_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} +TEST(sidorina_p_check_lexicographic_order_seq, Test_difference_1st_element_0) { + std::vector> in = {{'a', 'b', 'c'}, {'d', 'b', 'c'}}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + sidorina_p_check_lexicographic_order_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(sidorina_p_check_lexicographic_order_seq, Test_difference_1st_element_1) { + std::vector> in = {{'b', 'c', 'g'}, {'a', 'c', 'g'}}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + sidorina_p_check_lexicographic_order_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(sidorina_p_check_lexicographic_order_seq, Test_difference_2nd_element_1) { + std::vector> in = {{'e', 'c', 'g'}, {'e', 'a', 'g'}}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + sidorina_p_check_lexicographic_order_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} +TEST(sidorina_p_check_lexicographic_order_seq, Test_difference_3d_element_1) { + std::vector> in = {{'a', 'b', 'g'}, {'a', 'b', 'a'}}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[1].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + sidorina_p_check_lexicographic_order_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/sidorina_p_check_lexicographic_order/include/ops_seq.hpp b/tasks/seq/sidorina_p_check_lexicographic_order/include/ops_seq.hpp new file mode 100644 index 00000000000..bffed680430 --- /dev/null +++ b/tasks/seq/sidorina_p_check_lexicographic_order/include/ops_seq.hpp @@ -0,0 +1,22 @@ +// Copyright 2024 Nesterov Alexander +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace sidorina_p_check_lexicographic_order_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res{}; +}; +} // namespace sidorina_p_check_lexicographic_order_seq \ No newline at end of file diff --git a/tasks/seq/sidorina_p_check_lexicographic_order/perf_tests/main.cpp b/tasks/seq/sidorina_p_check_lexicographic_order/perf_tests/main.cpp new file mode 100644 index 00000000000..0d9d7cc3637 --- /dev/null +++ b/tasks/seq/sidorina_p_check_lexicographic_order/perf_tests/main.cpp @@ -0,0 +1,75 @@ +// Copyright 2024 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/sidorina_p_check_lexicographic_order/include/ops_seq.hpp" + +TEST(sidorina_p_check_lexicographic_order_seq, Test_0) { + std::vector str1(40000000, 'a'); + std::vector str2(39999999, 'a'); + str2.push_back('b'); + std::vector> input = {str1, str2}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input[1].data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->inputs_count.emplace_back(input[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(sidorina_p_check_lexicographic_order_seq, Test_1) { + std::vector str1(40000000, 'b'); + std::vector str2(39999999, 'b'); + str2.push_back('a'); + std::vector> input = {str1, str2}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input[0].data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input[1].data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->inputs_count.emplace_back(input[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/sidorina_p_check_lexicographic_order/src/ops_seq.cpp b/tasks/seq/sidorina_p_check_lexicographic_order/src/ops_seq.cpp new file mode 100644 index 00000000000..f6c23d9349f --- /dev/null +++ b/tasks/seq/sidorina_p_check_lexicographic_order/src/ops_seq.cpp @@ -0,0 +1,39 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/sidorina_p_check_lexicographic_order/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool sidorina_p_check_lexicographic_order_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_.resize(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + for (size_t i = 0; i < taskData->inputs_count[0]; ++i) { + const char* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], input_[i].begin()); + } + res = 0; + return true; +} +bool sidorina_p_check_lexicographic_order_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool sidorina_p_check_lexicographic_order_seq::TestTaskSequential::run() { + internal_order_test(); + + for (size_t i = 0; i < std::min(input_[0].size(), input_[1].size()); ++i) { + if (input_[0][i] > input_[1][i]) { + res = 1; + break; + } + if (input_[0][i] < input_[1][i]) break; + } + return true; +} +bool sidorina_p_check_lexicographic_order_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From 83ee55739d73afc99ae3fc9f622990942f7611cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=92=D0=B0=D0=BD=D1=8F=20=D0=90=D0=BB=D0=BF=D1=83=D0=BF?= =?UTF-8?q?=D0=BA=D0=B8=D0=BD?= <129745841+Applejack2004@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:27:38 +0300 Subject: [PATCH 126/155] =?UTF-8?q?=D0=90=D0=BB=D0=BF=D1=83=D1=82=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=98=D0=B2=D0=B0=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=208.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5=D0=B5=20?= =?UTF-8?q?=D0=BE=D1=82=D0=BB=D0=B8=D1=87=D0=B0=D1=8E=D1=89=D0=B8=D1=85?= =?UTF-8?q?=D1=81=D1=8F=20=D0=BF=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D1=8E=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8?= =?UTF-8?q?=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#147)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательная версия: Итеративно просматривает все пары соседних элементов входного вектора, вычисляя абсолютную разность для каждой пары и запоминая максимальную разность и соответствующие элементы. Возвращает пару элементов с максимальной разностью. Параллельная (MPI) версия: Использует Boost.MPI для распределения входного вектора. broadcast используется для распределения размера подзадач. Каждый процесс вычисляет локальную максимальную разность и обменивается граничными элементами (через world.send и world.recv). boost::mpi::reduce с пользовательской функцией сравнения объединяет локальные результаты, определяя глобальную максимальную разность. --------- Co-authored-by: Ivan Alputov --- .../func_tests/main.cpp | 347 ++++++++++++++++++ .../include/ops_mpi.hpp | 49 +++ .../perf_tests/main.cpp | 86 +++++ .../src/ops_mpi.cpp | 108 ++++++ .../func_tests/main.cpp | 131 +++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 74 ++++ .../src/ops_seq.cpp | 44 +++ 8 files changed, 863 insertions(+) create mode 100644 tasks/mpi/alputov_i_most_different_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/alputov_i_most_different_neighbor_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/alputov_i_most_different_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp create mode 100644 tasks/seq/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/seq/alputov_i_most_different_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/alputov_i_most_different_neighbor_elements/func_tests/main.cpp b/tasks/mpi/alputov_i_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..3815e2ea5f0 --- /dev/null +++ b/tasks/mpi/alputov_i_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,347 @@ +#include + +#include +#include +#include +#include +#include + +#include "mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp" + +namespace alputov_i_most_different_neighbor_elements_mpi { +std::vector generator(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + + std::vector ans(sz); + for (int i = 0; i < sz; ++i) { + ans[i] = gen() % 1000; + int x = gen() % 2; + if (x == 0) ans[i] *= -1; + } + + return ans; +} +} // namespace alputov_i_most_different_neighbor_elements_mpi + +TEST(alputov_i_most_different_neighbor_elements_mpi, EmptyInput_ReturnsFalse) { + boost::mpi::communicator world; + std::vector global_vec(1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + // Create data + std::vector reference_ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); + taskDataSeq->outputs_count.emplace_back(reference_ans.size()); + + // Create Task + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq + most_different_neighbor_elements_seq(taskDataSeq); + ASSERT_EQ(most_different_neighbor_elements_seq.validation(), false); + } +} + +TEST(alputov_i_most_different_neighbor_elements_mpi, InputSizeTwo_CorrectResult) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int sz = 2; + global_vec = std::vector(sz, 0); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + } + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + + // Create Task + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq + most_different_neighbor_elements_seq(taskDataSeq); + ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); + most_different_neighbor_elements_seq.pre_processing(); + most_different_neighbor_elements_seq.run(); + most_different_neighbor_elements_seq.post_processing(); + + ASSERT_EQ(reference_diff[0], global_diff[0]); + } +} +TEST(alputov_i_most_different_neighbor_elements_mpi, LargeRandomInput_CorrectResult) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int sz = 1234; + global_vec = alputov_i_most_different_neighbor_elements_mpi::generator(sz); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq + most_different_neighbor_elements_seq(taskDataSeq); + ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); + most_different_neighbor_elements_seq.pre_processing(); + most_different_neighbor_elements_seq.run(); + most_different_neighbor_elements_seq.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(alputov_i_most_different_neighbor_elements_mpi, MediumRandomInput_CorrectResult) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int sz = 120; + global_vec = alputov_i_most_different_neighbor_elements_mpi::generator(sz); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq + most_different_neighbor_elements_seq(taskDataSeq); + ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); + most_different_neighbor_elements_seq.pre_processing(); + most_different_neighbor_elements_seq.run(); + most_different_neighbor_elements_seq.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(alputov_i_most_different_neighbor_elements_mpi, AllEqualElements_CorrectResult) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int sz = 100; + global_vec = std::vector(sz, 0); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq + most_different_neighbor_elements_seq(taskDataSeq); + ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); + most_different_neighbor_elements_seq.pre_processing(); + most_different_neighbor_elements_seq.run(); + most_different_neighbor_elements_seq.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(alputov_i_most_different_neighbor_elements_mpi, AlternatingElements_CorrectResult) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {1, -1, 1, -1, 1, -1, 1, -1, 1, -1}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq + most_different_neighbor_elements_seq(taskDataSeq); + ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); + most_different_neighbor_elements_seq.pre_processing(); + most_different_neighbor_elements_seq.run(); + most_different_neighbor_elements_seq.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(alputov_i_most_different_neighbor_elements_mpi, ConstantDifferenceSequence_CorrectResult) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int sz = 123; + global_vec.resize(sz); + for (int i = 0; i < sz; ++i) { + global_vec[i] = sz - i; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq + most_different_neighbor_elements_seq(taskDataSeq); + ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); + most_different_neighbor_elements_seq.pre_processing(); + most_different_neighbor_elements_seq.run(); + most_different_neighbor_elements_seq.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(alputov_i_most_different_neighbor_elements_mpi, MostlyZerosInput_ReturnsCorrectPair) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {12, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq + most_different_neighbor_elements_seq(taskDataSeq); + ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); + most_different_neighbor_elements_seq.pre_processing(); + most_different_neighbor_elements_seq.run(); + most_different_neighbor_elements_seq.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..7a939f660f0 --- /dev/null +++ b/tasks/mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace alputov_i_most_different_neighbor_elements_mpi { + +class most_different_neighbor_elements_seq : public ppc::core::Task { + public: + explicit most_different_neighbor_elements_seq(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::pair res{}; +}; + +class most_different_neighbor_elements_mpi : public ppc::core::Task { + public: + explicit most_different_neighbor_elements_mpi(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + std::pair res; + size_t size; + size_t st; + boost::mpi::communicator world; +}; + +} // namespace alputov_i_most_different_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..bd9ebae8591 --- /dev/null +++ b/tasks/mpi/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,86 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp" + +TEST(alputov_i_most_different_neighbor_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec(20000000, 0); + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = + std::make_shared( + taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(2, global_sum[0]); + } +} + +TEST(alputov_i_most_different_neighbor_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec(20000000, 0); + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = + std::make_shared( + taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_sum[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/alputov_i_most_different_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/alputov_i_most_different_neighbor_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..16552d1a870 --- /dev/null +++ b/tasks/mpi/alputov_i_most_different_neighbor_elements/src/ops_mpi.cpp @@ -0,0 +1,108 @@ +#include "mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include + +#include "seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp" + +bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq::pre_processing() { + internal_order_test(); + + auto input = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp, tmp + taskData->inputs_count[0], input.begin()); + + input_ = std::vector>(input.size() - 1); + + for (size_t i = 1; i < input.size(); ++i) { + input_[i - 1] = {std::abs(input[i] - input[i - 1]), std::min(input[i], input[i - 1])}; + } + + res = input_[0]; + + return true; +} + +bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; +} + +bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq::run() { + internal_order_test(); + + for (size_t i = 1; i < input_.size(); ++i) { + if (res.first < input_[i].first) res = input_[i]; + } + return true; +} + +bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res.first; + return true; +} + +bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi::pre_processing() { + internal_order_test(); + + res = {INT_MIN, -1}; + return true; +} + +bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi::run() { + internal_order_test(); + + int delta_size = 0; + if (world.rank() == 0) { + delta_size = (taskData->inputs_count[0]) / world.size(); + size = taskData->inputs_count[0]; + if (taskData->inputs_count[0] % world.size() > 0) delta_size++; + } + broadcast(world, delta_size, 0); + + if (world.rank() == 0) { + input_ = std::vector(world.size() * delta_size + 2, 0); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp, tmp + taskData->inputs_count[0], input_.begin()); + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta_size, delta_size + 1); + } + } + + local_input_ = std::vector(delta_size + 1); + st = world.rank() * delta_size; + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta_size + 1); + } else { + world.recv(0, 0, local_input_.data(), delta_size + 1); + } + + std::pair local_ans = {INT_MIN, -1}; + for (size_t i = 0; (i + st) < size - 1 && i < (local_input_.size() - 1); ++i) { + std::pair tmp = {abs(local_input_[i + 1] - local_input_[i]), i + st}; + local_ans = std::max(local_ans, tmp); + } + reduce(world, local_ans, res, boost::mpi::maximum>(), 0); + return true; +} + +bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res.first; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/alputov_i_most_different_neighbor_elements/func_tests/main.cpp b/tasks/seq/alputov_i_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..d46f5759699 --- /dev/null +++ b/tasks/seq/alputov_i_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,131 @@ +#include + +#include +#include + +#include "seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp" + +namespace alputov_i_most_different_neighbor_elements_seq { +std::vector generator(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + + std::vector ans(sz); + for (int i = 0; i < sz; ++i) { + ans[i] = gen() % 1000; + int x = gen() % 2; + if (x == 0) ans[i] *= -1; + } + + return ans; +} +} // namespace alputov_i_most_different_neighbor_elements_seq + +TEST(alputov_i_most_different_neighbor_elements_seq, EmptyInput_ReturnsFalse) { + std::vector in = {}; + std::vector> out(1); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(alputov_i_most_different_neighbor_elements_seq, InputSizeTwo_ReturnsCorrectPair) { + std::vector in = alputov_i_most_different_neighbor_elements_seq::generator(2); + std::vector> out(1); + std::pair ans = {std::min(in[0], in[1]), std::max(in[0], in[1])}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(alputov_i_most_different_neighbor_elements_seq, SequentialInput_ReturnsFirstTwoElements) { + std::vector in = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + std::vector> out(1); + std::pair ans = {1, 2}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(alputov_i_most_different_neighbor_elements_seq, MostlyZerosInput_ReturnsZeroAndLargest) { + std::vector in = {0, 0, 0, 0, 0, 0, 0, 0, 0, 12}; + std::vector> out(1); + std::pair ans = {0, 12}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(alputov_i_most_different_neighbor_elements_seq, AllZerosInput_ReturnsZeroZero) { + std::vector in(100, 0); + std::vector> out(1); + std::pair ans = {0, 0}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(alputov_i_most_different_neighbor_elements_seq, CloseNegativeNumbers_ReturnsCorrectPair) { + std::vector in = {-1, -2, -3, -4, -1000}; + std::vector> out(1); + std::pair ans = {-1000, -4}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp b/tasks/seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..57e9b7d14c3 --- /dev/null +++ b/tasks/seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace alputov_i_most_different_neighbor_elements_seq { + +class most_different_neighbor_elements_seq : public ppc::core::Task { + public: + explicit most_different_neighbor_elements_seq(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::pair res{}; +}; + +} // namespace alputov_i_most_different_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/seq/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..c2ade47b3ed --- /dev/null +++ b/tasks/seq/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,74 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp" + +TEST(alputov_i_most_different_neighbor_elements_seq, test_pipeline_run) { + std::vector in(20000000, 0); + std::vector> out(1); + + std::pair ans = {0, 0}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared( + taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(ans, out[0]); +} + +TEST(alputov_i_most_different_neighbor_elements_seq, test_task_run) { + std::vector in(20000000, 0); + std::vector> out(1); + + std::pair ans = {0, 0}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared( + taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(ans, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/alputov_i_most_different_neighbor_elements/src/ops_seq.cpp b/tasks/seq/alputov_i_most_different_neighbor_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..e6d53206c87 --- /dev/null +++ b/tasks/seq/alputov_i_most_different_neighbor_elements/src/ops_seq.cpp @@ -0,0 +1,44 @@ +#include "seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp" + +#include +#include + +bool alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq::pre_processing() { + internal_order_test(); + + auto input = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp, tmp + taskData->inputs_count[0], input.begin()); + + input_ = std::vector>(input.size() - 1); + + for (size_t i = 1; i < input.size(); ++i) { + input_[i - 1] = {std::abs(input[i] - input[i - 1]), std::min(input[i], input[i - 1])}; + } + + res = input_[0]; + + return true; +} + +bool alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq::validation() { + internal_order_test(); + return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; +} + +bool alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq::run() { + internal_order_test(); + + for (size_t i = 1; i < input_.size(); ++i) { + if (res.first < input_[i].first) res = input_[i]; + } + + return true; +} + +bool alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq::post_processing() { + internal_order_test(); + + reinterpret_cast*>(taskData->outputs[0])[0] = {res.second, res.second + res.first}; + return true; +} From 65327bc13d9a9d11470f6728f5c23a2fcd037379 Mon Sep 17 00:00:00 2001 From: belovtut <91803715+belovtut@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:28:31 +0300 Subject: [PATCH 127/155] =?UTF-8?q?=D0=91=D0=B5=D0=BB=D0=BE=D0=B2=20=D0=90?= =?UTF-8?q?=D1=80=D1=82=D1=91=D0=BC.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87?= =?UTF-8?q?=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2013.?= =?UTF-8?q?=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD?= =?UTF-8?q?=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B5?= =?UTF-8?q?=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B=20(#149)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Описание последовательной задачи:** Поскольку матрица реализована в виде одномерного массива (по условию задачи), то алгоритм последовательно обходит вектор и ищет максимальный элемент в матрице (в векторе) с помощью `std::max_element`. **Описание параллельной задачи (MPI):** Размеры матрицы, входной массив данных и массив для записи результата помещаются root-процессом в структуру `TaskData`. Распределение данных по процессам происходит равномерно, для каждого процесса вычисляется количество элементов и смещения относительно начала массива. Затем с помощью `boost::mpi::scatterv()` root-процесс рассылает данные процессам, а все остальные процессы принимают свой "кусок" данных от корневого, также с помощью `scatterv`. Каждый процесс находит локальный максимум среди своей части элементов. После чего root процесс, используя `reduce`, собирает с остальных процессов локальные максимумы, и с помощью функтора `std::max()` определяет конечный (глобальный) максимум. --- .../func_tests/main.cpp | 631 ++++++++++++++++++ .../include/ops_mpi.hpp | 190 ++++++ .../perf_tests/main.cpp | 127 ++++ .../src/ops_mpi.cpp | 1 + .../func_tests/main.cpp | 375 +++++++++++ .../include/ops_seq.hpp | 79 +++ .../perf_tests/main.cpp | 104 +++ .../src/ops_seq.cpp | 1 + 8 files changed, 1508 insertions(+) create mode 100644 tasks/mpi/belov_a_max_value_of_matrix_elements/func_tests/main.cpp create mode 100644 tasks/mpi/belov_a_max_value_of_matrix_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/belov_a_max_value_of_matrix_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/belov_a_max_value_of_matrix_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/belov_a_max_value_of_matrix_elements/func_tests/main.cpp create mode 100644 tasks/seq/belov_a_max_value_of_matrix_elements/include/ops_seq.hpp create mode 100644 tasks/seq/belov_a_max_value_of_matrix_elements/perf_tests/main.cpp create mode 100644 tasks/seq/belov_a_max_value_of_matrix_elements/src/ops_seq.cpp diff --git a/tasks/mpi/belov_a_max_value_of_matrix_elements/func_tests/main.cpp b/tasks/mpi/belov_a_max_value_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..db82701b678 --- /dev/null +++ b/tasks/mpi/belov_a_max_value_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,631 @@ +#include + +#include +#include +#include +#include + +#include "mpi/belov_a_max_value_of_matrix_elements/include/ops_mpi.hpp" + +using namespace belov_a_max_value_of_matrix_elements_mpi; + +template +std::vector generate_random_matrix(int rows, int cols, const T& left = T{-1000}, const T& right = T{1000}) { + std::vector res(rows * cols); + std::random_device dev; + std::mt19937 gen(dev()); + for (size_t i = 0; i < res.size(); i++) { + res[i] = left + static_cast(gen() % int(right - left + 1)); + } + return res; +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Equality_Reference_Max_With_Calculated_Max) { + boost::mpi::communicator world; + std::vector dimensions = {3, 4}; + + std::vector global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + std::vector global_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Large_Matrix_Int) { + boost::mpi::communicator world; + std::vector dimensions = {1000, 1000}; + + std::vector global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Large_Matrix_Double) { + boost::mpi::communicator world; + std::vector dimensions = {1000, 1000}; + + std::vector global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + std::vector global_max(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_NonDivisibleDimensions) { + boost::mpi::communicator world; + std::vector dimensions = {1023, 1027}; + + std::vector global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_One_Row_Matrix) { + boost::mpi::communicator world; + std::vector dimensions = {1, 1000}; + + std::vector global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Small_Matrix_Double) { + boost::mpi::communicator world; + std::vector dimensions = {3, 3}; + + std::vector global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + std::vector global_max(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_One_Element_Matrix) { + boost::mpi::communicator world; + std::vector dimensions = {1, 1}; + + std::vector global_matrix = {42}; + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Large_Range_Values) { + boost::mpi::communicator world; + std::vector dimensions = {100, 100}; + + std::vector global_matrix(dimensions[0] * dimensions[1]); + + std::srand(static_cast(std::time(nullptr))); + for (int& element : global_matrix) { + element = -1000000 + std::rand() % 2000001; + } + + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Same_Value_Double) { + boost::mpi::communicator world; + std::vector dimensions = {10, 10}; + + std::vector global_matrix(100, 3.1415); + std::vector global_max(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Rectangular_Matrix_More_Rows) { + boost::mpi::communicator world; + std::vector dimensions = {50, 5}; + + std::vector global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Max_Element_At_First_Position_Int) { + boost::mpi::communicator world; + std::vector dimensions = {4, 4}; + + std::vector global_matrix = {1000, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Max_Element_At_Last_Position_Double) { + boost::mpi::communicator world; + std::vector dimensions = {3, 3}; + + std::vector global_matrix = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 834.58}; + std::vector global_max(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Small_2x2_Matrix) { + boost::mpi::communicator world; + std::vector dimensions = {2, 2}; + + std::vector global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_max(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Zero_Rows) { + boost::mpi::communicator world; + std::vector dimensions = {0, 10}; + + std::vector global_matrix = {}; + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(belov_a_max_value_of_matrix_elements_mpi, Test_Zero_Columns) { + boost::mpi::communicator world; + std::vector dimensions = {10, 0}; + + std::vector global_matrix = {}; + std::vector global_max(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} \ No newline at end of file diff --git a/tasks/mpi/belov_a_max_value_of_matrix_elements/include/ops_mpi.hpp b/tasks/mpi/belov_a_max_value_of_matrix_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..922c5dc482b --- /dev/null +++ b/tasks/mpi/belov_a_max_value_of_matrix_elements/include/ops_mpi.hpp @@ -0,0 +1,190 @@ +#ifndef OPS_MPI_HPP +#define OPS_MPI_HPP + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace belov_a_max_value_of_matrix_elements_mpi { + +template +class MaxValueOfMatrixElementsParallel : public ppc::core::Task { + public: + explicit MaxValueOfMatrixElementsParallel(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + boost::mpi::communicator world; + + int rows_ = 0; + int cols_ = 0; + + T global_max_{}; + T local_max_ = std::numeric_limits::lowest(); + std::vector matrix; + + static T get_max_matrix_element(const std::vector& matrix); +}; + +template +T MaxValueOfMatrixElementsParallel::get_max_matrix_element(const std::vector& matrix) { + return matrix.empty() ? 0 : *std::max_element(matrix.begin(), matrix.end()); +} + +template +bool MaxValueOfMatrixElementsParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto* dimensions = reinterpret_cast(taskData->inputs[0]); + rows_ = dimensions[0]; + cols_ = dimensions[1]; + auto* inputMatrixData = reinterpret_cast(taskData->inputs[1]); + matrix.assign(inputMatrixData, inputMatrixData + rows_ * cols_); + } + + return true; +} + +template +bool MaxValueOfMatrixElementsParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + return !taskData->inputs.empty() && reinterpret_cast(taskData->inputs[0])[0] > 0 && + reinterpret_cast(taskData->inputs[0])[1] > 0; + } + return true; +} + +template +bool MaxValueOfMatrixElementsParallel::run() { + internal_order_test(); + + int rank = world.rank(); + int size = world.size(); + + int delta; + int remainder; + if (rank == 0) { + delta = rows_ * cols_ / size; + remainder = rows_ * cols_ % size; + } + + boost::mpi::broadcast(world, delta, 0); + boost::mpi::broadcast(world, remainder, 0); + + std::vector distr(size, delta); + std::vector displ(size, 0); + + for (int i = 0; i < remainder; ++distr[i], ++i); + for (int i = 1; i < size; ++i) { + displ[i] = displ[i - 1] + distr[i - 1]; + } + + std::vector local_matrix(distr[rank]); + + if (rank == 0) { + boost::mpi::scatterv(world, matrix, distr, displ, local_matrix.data(), distr[0], 0); + } else { + boost::mpi::scatterv(world, local_matrix.data(), distr[rank], 0); + } + + local_max_ = get_max_matrix_element(local_matrix); + boost::mpi::reduce(world, local_max_, global_max_, boost::mpi::maximum(), 0); + + return true; +} + +template +bool MaxValueOfMatrixElementsParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = global_max_; + } + + return true; +} + +template +class MaxValueOfMatrixElementsSequential : public ppc::core::Task { + public: + explicit MaxValueOfMatrixElementsSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int rows_ = 0; + int cols_ = 0; + T res{}; + std::vector matrix; + + static T get_max_matrix_element(const std::vector& matrix); +}; + +template +T MaxValueOfMatrixElementsSequential::get_max_matrix_element(const std::vector& matrix) { + return *std::max_element(matrix.begin(), matrix.end()); +} + +template +bool MaxValueOfMatrixElementsSequential::pre_processing() { + internal_order_test(); + + auto* dimensions = reinterpret_cast(taskData->inputs[0]); + rows_ = dimensions[0]; + cols_ = dimensions[1]; + + auto* inputMatrixData = reinterpret_cast(taskData->inputs[1]); + matrix.assign(inputMatrixData, inputMatrixData + rows_ * cols_); + + return true; +} + +template +bool MaxValueOfMatrixElementsSequential::validation() { + internal_order_test(); + + return !taskData->inputs.empty() && reinterpret_cast(taskData->inputs[0])[0] > 0 && + reinterpret_cast(taskData->inputs[0])[1] > 0; +} + +template +bool MaxValueOfMatrixElementsSequential::run() { + internal_order_test(); + + res = get_max_matrix_element(matrix); + return true; +} + +template +bool MaxValueOfMatrixElementsSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +} // namespace belov_a_max_value_of_matrix_elements_mpi + +#endif // OPS_MPI_HPP \ No newline at end of file diff --git a/tasks/mpi/belov_a_max_value_of_matrix_elements/perf_tests/main.cpp b/tasks/mpi/belov_a_max_value_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..1ef17959d8f --- /dev/null +++ b/tasks/mpi/belov_a_max_value_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,127 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/belov_a_max_value_of_matrix_elements/include/ops_mpi.hpp" + +template +std::vector generate_random_matrix(int rows, int cols, const T& left = T{-1000}, const T& right = T{1000}) { + std::vector res(rows * cols); + std::random_device dev; + std::mt19937 gen(dev()); + for (size_t i = 0; i < res.size(); i++) { + res[i] = left + static_cast(gen() % int(right - left + 1)); + } + return res; +} + +TEST(belov_a_max_value_matrix_mpi_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + std::vector dimensions; + std::vector global_matrix; + std::vector parallel_max(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + dimensions = std::vector{3, 4}; + global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(parallel_max.data())); + taskDataPar->outputs_count.emplace_back(parallel_max.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + std::vector sequence_max(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(sequence_max.data())); + taskDataSeq->outputs_count.emplace_back(sequence_max.size()); + auto testMpiTaskSequential = + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(sequence_max[0], parallel_max[0], 1e-5); + } +} + +TEST(belov_a_max_value_matrix_mpi_perf_test, test_task_run) { + boost::mpi::communicator world; + std::vector dimensions; + std::vector global_matrix; + std::vector parallel_max(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + dimensions = std::vector{3, 4}; + global_matrix = generate_random_matrix(dimensions[0], dimensions[1]); + taskDataPar->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(parallel_max.data())); + taskDataPar->outputs_count.emplace_back(parallel_max.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + std::vector sequence_max(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(sequence_max.data())); + taskDataSeq->outputs_count.emplace_back(sequence_max.size()); + auto testMpiTaskSequential = + belov_a_max_value_of_matrix_elements_mpi::MaxValueOfMatrixElementsSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(sequence_max[0], parallel_max[0], 1e-5); + } +} diff --git a/tasks/mpi/belov_a_max_value_of_matrix_elements/src/ops_mpi.cpp b/tasks/mpi/belov_a_max_value_of_matrix_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..66a6322b0c4 --- /dev/null +++ b/tasks/mpi/belov_a_max_value_of_matrix_elements/src/ops_mpi.cpp @@ -0,0 +1 @@ +#include "mpi/belov_a_max_value_of_matrix_elements/include/ops_mpi.hpp" \ No newline at end of file diff --git a/tasks/seq/belov_a_max_value_of_matrix_elements/func_tests/main.cpp b/tasks/seq/belov_a_max_value_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..d23f9ab143d --- /dev/null +++ b/tasks/seq/belov_a_max_value_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,375 @@ +#include + +#include +#include + +#include "seq/belov_a_max_value_of_matrix_elements/include/ops_seq.hpp" + +using namespace belov_a_max_value_of_matrix_elements_seq; + +template +std::vector generate_random_matrix(int rows, int cols, const T& left = T{-1000}, const T& right = T{1000}) { + std::vector res(rows * cols); + std::random_device dev; + std::mt19937 gen(dev()); + for (size_t i = 0; i < res.size(); i++) { + res[i] = left + static_cast(gen() % int(right - left + 1)); + } + return res; +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Positive_Integers) { + const int rows = 2; + const int cols = 3; + + std::vector matrix = {7, 24, 35, 5, 10, 13}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 35); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Negative_Integers) { + const int rows = 2; + const int cols = 3; + + std::vector matrix = {-7, -24, -3, -15, -10, -13}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], -3); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Mixed_Integers) { + const int rows = 2; + const int cols = 3; + + std::vector matrix = {-7, 24, -3, 15, 0, -1}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 24); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_One_Element) { + const int rows = 1; + const int cols = 1; + + std::vector matrix = {42}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 42); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Positive_Doubles) { + const int rows = 2; + const int cols = 3; + + std::vector matrix = {7.2, 24.1, 35.3, 5.5, 10.6, 13.9}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 35.3); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Mixed_Doubles) { + const int rows = 3; + const int cols = 2; + + std::vector matrix = {-10.1, 24.1, -3.5, 15.7, -0.5, -1.2}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 24.1); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Zeroes_Integers) { + const int rows = 3; + const int cols = 3; + + std::vector matrix = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 0); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_All_Same_Integers) { + const int rows = 2; + const int cols = 4; + + std::vector matrix = {8, 8, 8, 8, 8, 8, 8, 8}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 8); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Float_Negative) { + const int rows = 2; + const int cols = 2; + + std::vector matrix = {-7.5f, -0.2f, -15.3f, -5.9f}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], -0.2f); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Mixed_Zero_Positive_Negative) { + const int rows = 2; + const int cols = 3; + + std::vector matrix = {0, -20, 5, 10, -5, 0}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 10); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Large_Integers) { + const int rows = 2; + const int cols = 3; + + std::vector matrix = {2147483647, -2147483648, 1, 0, 100, 999}; + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 2147483647); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Max_Value_Large_Matrix_Diverse_Double) { + const int rows = 1000; + const int cols = 1000; + + // Creating a matrix with a variety of values: large, small, positive, negative, fractional + std::vector matrix(rows * cols); + for (int i = 0; i < rows * cols; ++i) { + if (i % 5 == 0) { + matrix[i] = static_cast(i * 1.1); + } else if (i % 5 == 1) { + matrix[i] = static_cast(-i * 1.2); + } else if (i % 5 == 2) { + matrix[i] = static_cast(i * 0.001); + } else if (i % 5 == 3) { + matrix[i] = -0.5; + } else { + matrix[i] = 1999999.99; + } + } + + std::vector dimensions = {rows, cols}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + belov_a_max_value_of_matrix_elements_seq::MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(out[0], 1999999.99); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Validation_EmptyData) { + std::shared_ptr taskDataSeq = std::make_shared(); + + MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_PreProcessing_NonPositiveDimensions) { + std::vector dimensions = {0, 5}; + std::vector matrix = {}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + belov_a_max_value_of_matrix_elements_seq::MaxValueOfMatrixElementsSequential testTaskSequential(taskDataSeq); + + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(belov_a_max_value_of_matrix_elements_seq, Test_Random_Matrix_Integers) { + std::vector matrix = generate_random_matrix(10, 10); + bool flag = true; + + for (const auto& item : matrix) { + if (item < -1000 || item > 1000) { + flag = false; + break; + } + } + + ASSERT_TRUE(flag && matrix.size() == 100); +} \ No newline at end of file diff --git a/tasks/seq/belov_a_max_value_of_matrix_elements/include/ops_seq.hpp b/tasks/seq/belov_a_max_value_of_matrix_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..f421be39c1d --- /dev/null +++ b/tasks/seq/belov_a_max_value_of_matrix_elements/include/ops_seq.hpp @@ -0,0 +1,79 @@ +#ifndef OPS_SEQ_HPP +#define OPS_SEQ_HPP + +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace belov_a_max_value_of_matrix_elements_seq { + +template +class MaxValueOfMatrixElementsSequential : public ppc::core::Task { + public: + explicit MaxValueOfMatrixElementsSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int rows_ = 0; + int cols_ = 0; + T res{}; + std::vector matrix; + + static T get_max_matrix_element(const std::vector& matrix); +}; + +template +T MaxValueOfMatrixElementsSequential::get_max_matrix_element(const std::vector& matrix) { + return *std::max_element(matrix.begin(), matrix.end()); +} + +template +bool MaxValueOfMatrixElementsSequential::pre_processing() { + internal_order_test(); + + auto* dimensions = reinterpret_cast(taskData->inputs[0]); + rows_ = dimensions[0]; + cols_ = dimensions[1]; + + auto inputMatrixData = reinterpret_cast(taskData->inputs[1]); + matrix.assign(inputMatrixData, inputMatrixData + rows_ * cols_); + + return true; +} + +template +bool MaxValueOfMatrixElementsSequential::validation() { + internal_order_test(); + + return !taskData->inputs.empty() && reinterpret_cast(taskData->inputs[0])[0] > 0 && + reinterpret_cast(taskData->inputs[0])[1] > 0; +} + +template +bool MaxValueOfMatrixElementsSequential::run() { + internal_order_test(); + + res = get_max_matrix_element(matrix); + return true; +} + +template +bool MaxValueOfMatrixElementsSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +} // namespace belov_a_max_value_of_matrix_elements_seq + +#endif // OPS_SEQ_HPP \ No newline at end of file diff --git a/tasks/seq/belov_a_max_value_of_matrix_elements/perf_tests/main.cpp b/tasks/seq/belov_a_max_value_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..0697d5f6188 --- /dev/null +++ b/tasks/seq/belov_a_max_value_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,104 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/belov_a_max_value_of_matrix_elements/include/ops_seq.hpp" + +template +std::vector generate_random_matrix(int rows, int cols, const T &left = T{-1000}, const T &right = T{1000}) { + std::vector res(rows * cols); + std::random_device dev; + std::mt19937 gen(dev()); + for (size_t i = 0; i < res.size(); i++) { + res[i] = left + static_cast(gen() % int(right - left + 1)); + } + return res; +} + +TEST(belov_a_max_value_matrix_seq_perf_test, test_pipeline_run) { + const int rows = 600; + const int cols = 950; + + // Create data + std::vector matrix = generate_random_matrix(rows, cols); + + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(*std::max_element(matrix.begin(), matrix.end()), out[0]); +} + +TEST(belov_a_max_value_matrix_seq_perf_test, test_task_run) { + const int rows = 600; + const int cols = 950; + + // Create data + std::vector matrix = generate_random_matrix(rows, cols); + + std::vector dimensions = {rows, cols}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(dimensions.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(dimensions.size()); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(*std::max_element(matrix.begin(), matrix.end()), out[0]); +} diff --git a/tasks/seq/belov_a_max_value_of_matrix_elements/src/ops_seq.cpp b/tasks/seq/belov_a_max_value_of_matrix_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..389c03ccda6 --- /dev/null +++ b/tasks/seq/belov_a_max_value_of_matrix_elements/src/ops_seq.cpp @@ -0,0 +1 @@ +#include "seq/belov_a_max_value_of_matrix_elements/include/ops_seq.hpp" From ecf198852788f1423fb1322087a66f9ad01120f5 Mon Sep 17 00:00:00 2001 From: suvorovDm-1 <113026535+suvorovDm-1@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:30:29 +0300 Subject: [PATCH 128/155] =?UTF-8?q?=D0=A1=D1=83=D0=B2=D0=BE=D1=80=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=94=D0=BC=D0=B8=D1=82=D1=80=D0=B8=D0=B9.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=201.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20?= =?UTF-8?q?=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2?= =?UTF-8?q?=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#166)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: В последовательной задаче в функции run() для суммирования элементов вектора используется функция std::accumulate. Тесты проверяют работу с векторами разного размера, сумму элементов нулевого вектора, пустого вектора, вектора с единственным элементом. Описание MPI задачи: В MPI задаче в каждом процессе создается отдельный буфер с данными, куда root-процесс скидывает части исходного вектора используя scatterv(). Кроме того, корректно обрабатывается случай некратного количества процессов и элементов в векторе. В оставшихся процессах (всех, кроме root) задача сводится к использованию std::accumulate, как в последовательной задаче. После завершения обработки все данные суммируются в root процесс с помощью reduce. В тестах проверяется: 1. Работа с вектором, количество элементов которого больше числа процессов. 2. Работа с пустым вектором. 3. Работа с нулевым вектором. 4. Работа с вектором, содержащим единственный элемент. 5. Работа с вектором, количество элементов которого равно количеству процессов. 6. Работа с вектором, количество элементов которого меньше числа процессов. 7. Работа с вектором, количество элементов которого кратно количеству процессов. 8. Работа с вектором, количество элементов которого не кратно количеству процессов. --- .../func_tests/main.cpp | 393 ++++++++++++++++++ .../include/ops_mpi.hpp | 46 ++ .../perf_tests/main.cpp | 90 ++++ .../src/ops_mpi.cpp | 109 +++++ .../func_tests/main.cpp | 151 +++++++ .../include/vec.hpp | 27 ++ .../perf_tests/main.cpp | 85 ++++ .../src/vec.cpp | 30 ++ 8 files changed, 931 insertions(+) create mode 100644 tasks/mpi/suvorov_d_sum_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/suvorov_d_sum_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/suvorov_d_sum_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/suvorov_d_sum_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/suvorov_d_sum_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/suvorov_d_sum_of_vector_elements/include/vec.hpp create mode 100644 tasks/seq/suvorov_d_sum_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/suvorov_d_sum_of_vector_elements/src/vec.cpp diff --git a/tasks/mpi/suvorov_d_sum_of_vector_elements/func_tests/main.cpp b/tasks/mpi/suvorov_d_sum_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..2acd48f3f61 --- /dev/null +++ b/tasks/mpi/suvorov_d_sum_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,393 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/suvorov_d_sum_of_vector_elements/include/ops_mpi.hpp" + +// To avoid name conflicts with other projects. This function is only available in this file +namespace { +std::vector get_random_vector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::uniform_int_distribution dist(-1000, 1000); + + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = dist(gen); + } + + return vec; +} +} // namespace + +TEST(suvorov_d_sum_of_vector_elements_mpi, Test_Sum_With_Normal_Vector) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + int count_size_vector = 120; + // The number of processes should be less than the number of elements + if (world.size() >= count_size_vector) { + count_size_vector = 2 * world.size(); + } + global_vec = get_random_vector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + // Execution of addition + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel SumOfVectorElementsParallel(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel.validation(), true); + SumOfVectorElementsParallel.pre_processing(); + SumOfVectorElementsParallel.run(); + SumOfVectorElementsParallel.post_processing(); + + // Calculating the sum sequentially for verification + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(suvorov_d_sum_of_vector_elements_mpi, Test_Sum_With_Empty_Vector) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + // Execution of addition + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel SumOfVectorElementsParallel(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel.validation(), true); + SumOfVectorElementsParallel.pre_processing(); + SumOfVectorElementsParallel.run(); + SumOfVectorElementsParallel.post_processing(); + + // Calculating the sum sequentially for verification + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(suvorov_d_sum_of_vector_elements_mpi, Test_Sum_With_Single_Elementr) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 1; + global_vec = get_random_vector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + // Execution of addition + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel SumOfVectorElementsParallel(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel.validation(), true); + SumOfVectorElementsParallel.pre_processing(); + SumOfVectorElementsParallel.run(); + SumOfVectorElementsParallel.post_processing(); + + // Calculating the sum sequentially for verification + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(suvorov_d_sum_of_vector_elements_mpi, Test_Sum_When_Process_Count_More_Than_Elements) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + // The number of processes must be greater than the number of elements + const int count_size_vector = world.size() / 2; + global_vec = get_random_vector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + // Execution of addition + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel SumOfVectorElementsParallel(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel.validation(), true); + SumOfVectorElementsParallel.pre_processing(); + SumOfVectorElementsParallel.run(); + SumOfVectorElementsParallel.post_processing(); + + // Calculating the sum sequentially for verification + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(suvorov_d_sum_of_vector_elements_mpi, Test_Sum_When_Process_Count_Equal_To_Elements) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + // The number of processes must be equal to the number of elements + const int count_size_vector = world.size(); + global_vec = get_random_vector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + // Execution of addition + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel SumOfVectorElementsParallel(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel.validation(), true); + SumOfVectorElementsParallel.pre_processing(); + SumOfVectorElementsParallel.run(); + SumOfVectorElementsParallel.post_processing(); + + // Calculating the sum sequentially for verification + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(suvorov_d_sum_of_vector_elements_mpi, Test_Sum_With_Zero_Vector) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + // Creating a zero vector + const int count_size_vector = 120; + global_vec = std::vector(count_size_vector, 0); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + // Execution of addition + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel SumOfVectorElementsParallel(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel.validation(), true); + SumOfVectorElementsParallel.pre_processing(); + SumOfVectorElementsParallel.run(); + SumOfVectorElementsParallel.post_processing(); + + // Calculating the sum sequentially for verification + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(suvorov_d_sum_of_vector_elements_mpi, Test_Sum_With_Multiple_Of_Num_Proc_And_Num_Elems) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + // The number of elements must be a multiple of the number of processes + const int count_size_vector = 3 * world.size(); + global_vec = get_random_vector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + // Execution of addition + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel SumOfVectorElementsParallel(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel.validation(), true); + SumOfVectorElementsParallel.pre_processing(); + SumOfVectorElementsParallel.run(); + SumOfVectorElementsParallel.post_processing(); + + // Calculating the sum sequentially for verification + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(suvorov_d_sum_of_vector_elements_mpi, Test_Sum_With_Not_Multiple_Of_Num_Proc_And_Num_Elems) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + // The number of elements should not be a multiple of the number of processes + // Set prime number + int count_size_vector = 101; + + global_vec = get_random_vector(count_size_vector); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + // Execution of addition + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel SumOfVectorElementsParallel(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel.validation(), true); + SumOfVectorElementsParallel.pre_processing(); + SumOfVectorElementsParallel.run(); + SumOfVectorElementsParallel.post_processing(); + + // Calculating the sum sequentially for verification + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/suvorov_d_sum_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/suvorov_d_sum_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..d7e4458d41f --- /dev/null +++ b/tasks/mpi/suvorov_d_sum_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,46 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace suvorov_d_sum_of_vector_elements_mpi { + +class Sum_of_vector_elements_seq : public ppc::core::Task { + public: + explicit Sum_of_vector_elements_seq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; + +class Sum_of_vector_elements_parallel : public ppc::core::Task { + public: + explicit Sum_of_vector_elements_parallel(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res_{}; + boost::mpi::communicator world_; +}; + +} // namespace suvorov_d_sum_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/suvorov_d_sum_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/suvorov_d_sum_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..640ff7e8282 --- /dev/null +++ b/tasks/mpi/suvorov_d_sum_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,90 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/suvorov_d_sum_of_vector_elements/include/ops_mpi.hpp" + +TEST(suvorov_d_sum_of_vector_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 12000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto SumOfVectorElementsParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel->validation(), true); + SumOfVectorElementsParallel->pre_processing(); + SumOfVectorElementsParallel->run(); + SumOfVectorElementsParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(SumOfVectorElementsParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} + +TEST(suvorov_d_sum_of_vector_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 120000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto SumOfVectorElementsParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(SumOfVectorElementsParallel->validation(), true); + SumOfVectorElementsParallel->pre_processing(); + SumOfVectorElementsParallel->run(); + SumOfVectorElementsParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(SumOfVectorElementsParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} diff --git a/tasks/mpi/suvorov_d_sum_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/suvorov_d_sum_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..2013ca24506 --- /dev/null +++ b/tasks/mpi/suvorov_d_sum_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,109 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/suvorov_d_sum_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +bool suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + return true; +} + +bool suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1; +} + +bool suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq::run() { + internal_order_test(); + res_ = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +bool suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_seq::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel::pre_processing() { + internal_order_test(); + return true; +} + +bool suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel::validation() { + internal_order_test(); + if (world_.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1 && taskData->inputs_count.size() == 1 && taskData->inputs_count[0] >= 0; + } + return true; +} + +bool suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel::run() { + internal_order_test(); + + int input_size; + + if (world_.rank() == 0) { + input_size = taskData->inputs_count[0]; + input_ = std::vector(input_size); + auto *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + input_size, input_.begin()); + } + broadcast(world_, input_size, 0); + + int rest = input_size % world_.size(); + std::vector sizes(world_.size(), input_size / world_.size()); + std::vector displacements(world_.size(), 0); + int local_size; + + if (world_.rank() == 0) { + for (int i = 0; i < rest; ++i) { + sizes[i]++; + } + for (int i = 1; i < world_.size(); ++i) { + displacements[i] = displacements[i - 1] + sizes[i - 1]; + } + + local_size = sizes[world_.rank()]; + local_input_.resize(local_size); + + scatterv(world_, input_, sizes, displacements, local_input_.data(), local_size, 0); + } else { + if (world_.rank() < rest) { + sizes[world_.rank()]++; + } + local_size = sizes[world_.rank()]; + local_input_.resize(local_size); + + scatterv(world_, local_input_.data(), local_size, 0); + } + + int local_res; + + local_res = std::accumulate(local_input_.begin(), local_input_.end(), 0); + + reduce(world_, local_res, res_, std::plus(), 0); + + return true; +} + +bool suvorov_d_sum_of_vector_elements_mpi::Sum_of_vector_elements_parallel::post_processing() { + internal_order_test(); + if (world_.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/seq/suvorov_d_sum_of_vector_elements/func_tests/main.cpp b/tasks/seq/suvorov_d_sum_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..0f29ff1379b --- /dev/null +++ b/tasks/seq/suvorov_d_sum_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,151 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "seq/suvorov_d_sum_of_vector_elements/include/vec.hpp" + +TEST(suvorov_d_sum_of_vector_elements_seq, Test_Sum_1000) { + // Create data + const size_t vec_size = 100; + std::vector input_test_vector(vec_size); + std::vector test_output(1, 0); + + // Initialize an input vector with random integers and getting the correct sum result + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(-100, 100); + int right_result = 0; + for (size_t i = 0; i < vec_size; ++i) { + input_test_vector[i] = dis(gen); + right_result += input_test_vector[i]; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_test_vector.data())); + taskDataSeq->inputs_count.emplace_back(input_test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(test_output.data())); + taskDataSeq->outputs_count.emplace_back(test_output.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_seq::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + ASSERT_EQ(right_result, test_output[0]); +} + +TEST(suvorov_d_sum_of_vector_elements_seq, Test_Sum_10000000) { + // Create data + const size_t vec_size = 10000000; + std::vector input_test_vector(vec_size); + std::vector test_output(1, 0); + + // Initialize an input vector with random integers and getting the correct sum result + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(-1000000, 1000000); + int right_result = 0; + for (size_t i = 0; i < vec_size; ++i) { + input_test_vector[i] = dis(gen); + right_result += input_test_vector[i]; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_test_vector.data())); + taskDataSeq->inputs_count.emplace_back(input_test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(test_output.data())); + taskDataSeq->outputs_count.emplace_back(test_output.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_seq::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + ASSERT_EQ(right_result, test_output[0]); +} + +TEST(suvorov_d_sum_of_vector_elements_seq, Test_Sum_With_Single_Element) { + // Create data + const size_t vec_size = 100; + std::vector input_test_vector(vec_size); + std::vector test_output(1, 0); + + // Initialize an input vector with random integers and getting the correct sum result + int right_result = 0; + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(-1000000, 1000000); + input_test_vector[0] = dis(gen); + right_result = input_test_vector[0]; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_test_vector.data())); + taskDataSeq->inputs_count.emplace_back(input_test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(test_output.data())); + taskDataSeq->outputs_count.emplace_back(test_output.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_seq::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + ASSERT_EQ(right_result, test_output[0]); +} + +TEST(suvorov_d_sum_of_vector_elements_seq, Test_Sum_With_Zero_Vector) { + // Create data + const size_t vec_size = 100; + std::vector input_test_vector(vec_size, 0); + std::vector test_output(1, 0); + + // Initialize an input vector with random integers and getting the correct sum result + int right_result = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_test_vector.data())); + taskDataSeq->inputs_count.emplace_back(input_test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(test_output.data())); + taskDataSeq->outputs_count.emplace_back(test_output.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_seq::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + ASSERT_EQ(right_result, test_output[0]); +} + +TEST(suvorov_d_sum_of_vector_elements_seq, Test_Sum_With_Empty_Vector) { + // Create data + std::vector input_test_vector; + std::vector test_output(1, 0); + + // Initialize an input vector with random integers and getting the correct sum result + int right_result = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_test_vector.data())); + taskDataSeq->inputs_count.emplace_back(input_test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(test_output.data())); + taskDataSeq->outputs_count.emplace_back(test_output.size()); + + // Create Task + suvorov_d_sum_of_vector_elements_seq::Sum_of_vector_elements_seq SumOfVectorElementsSeq(taskDataSeq); + ASSERT_EQ(SumOfVectorElementsSeq.validation(), true); + SumOfVectorElementsSeq.pre_processing(); + SumOfVectorElementsSeq.run(); + SumOfVectorElementsSeq.post_processing(); + ASSERT_EQ(right_result, test_output[0]); +} diff --git a/tasks/seq/suvorov_d_sum_of_vector_elements/include/vec.hpp b/tasks/seq/suvorov_d_sum_of_vector_elements/include/vec.hpp new file mode 100644 index 00000000000..253615aa601 --- /dev/null +++ b/tasks/seq/suvorov_d_sum_of_vector_elements/include/vec.hpp @@ -0,0 +1,27 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace suvorov_d_sum_of_vector_elements_seq { + +class Sum_of_vector_elements_seq : public ppc::core::Task { + public: + explicit Sum_of_vector_elements_seq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; + +} // namespace suvorov_d_sum_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/suvorov_d_sum_of_vector_elements/perf_tests/main.cpp b/tasks/seq/suvorov_d_sum_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..a54ef329926 --- /dev/null +++ b/tasks/seq/suvorov_d_sum_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,85 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/suvorov_d_sum_of_vector_elements/include/vec.hpp" + +TEST(suvorov_d_sum_of_vector_elements_seq, test_pipeline_run) { + const int count = 100000000; + + // Create data + std::vector in(count, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto SumOfVectorElementsSeq = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(SumOfVectorElementsSeq); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count, out[0]); +} + +TEST(suvorov_d_sum_of_vector_elements_seq, test_task_run) { + const int count = 100000000; + + // Create data + std::vector in(count, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto SumOfVectorElementsSeq = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(SumOfVectorElementsSeq); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count, out[0]); +} diff --git a/tasks/seq/suvorov_d_sum_of_vector_elements/src/vec.cpp b/tasks/seq/suvorov_d_sum_of_vector_elements/src/vec.cpp new file mode 100644 index 00000000000..53eb8402435 --- /dev/null +++ b/tasks/seq/suvorov_d_sum_of_vector_elements/src/vec.cpp @@ -0,0 +1,30 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/suvorov_d_sum_of_vector_elements/include/vec.hpp" + +bool suvorov_d_sum_of_vector_elements_seq::Sum_of_vector_elements_seq::pre_processing() { + internal_order_test(); + // Init value for input and output + int* input_pointer = reinterpret_cast(taskData->inputs[0]); + input_.assign(input_pointer, input_pointer + taskData->inputs_count[0]); + return true; +} + +bool suvorov_d_sum_of_vector_elements_seq::Sum_of_vector_elements_seq::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1; +} + +bool suvorov_d_sum_of_vector_elements_seq::Sum_of_vector_elements_seq::run() { + internal_order_test(); + + res_ = std::accumulate(input_.begin(), input_.end(), 0); + + return true; +} + +bool suvorov_d_sum_of_vector_elements_seq::Sum_of_vector_elements_seq::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} From c29537a9a68fc256e9e068c130c616bb5c4ca148 Mon Sep 17 00:00:00 2001 From: Vav1L0N <121250360+Vav1L0N@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:32:30 +0300 Subject: [PATCH 129/155] =?UTF-8?q?=D0=92=D0=B0=D0=B2=D0=B8=D0=BB=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=92=D0=B8=D1=82=D0=B0=D0=BB=D0=B8=D0=B9.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=2018.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0?= =?UTF-8?q?=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE=D0=BB?= =?UTF-8?q?=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86?= =?UTF-8?q?=D1=8B.=20(#176)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Программа вычисляет минимальное значение в каждом столбце матрицы: -Последовательная реализация: Алгоритм последовательно находит минимальные элементы в каждом столбце матрицы. Он инициализирует переменную для хранения минимального значения для каждого столбца, затем перебирает все строки, сравнивая значения в текущем столбце с текущим минимальным значением и обновляет его при необходимости. В конце минимальные значения каждого столбца сохраняются в результирующий вектор. -MPI реализация: Алгоритм использует MPI для параллельного нахождения минимальных элементов в каждом столбце матрицы, разделенной между процессами. Каждый процесс вычисляет минимумы в своем локальном подмассиве, а затем главный процесс (с рангом 0) собирает минимальные значения от всех процессов, обновляя глобальный минимум. В результате минимальные значения из каждого столбца сохраняются в результирующем векторе. --- .../func_tests/main.cpp | 282 ++++++++++++++++++ .../include/ops_mpi.hpp | 44 +++ .../perf_tests/main.cpp | 105 +++++++ .../src/ops_mpi.cpp | 164 ++++++++++ .../func_tests/main.cpp | 173 +++++++++++ .../include/ops_seq.hpp | 22 ++ .../perf_tests/main.cpp | 114 +++++++ .../src/ops_seq.cpp | 55 ++++ 8 files changed, 959 insertions(+) create mode 100644 tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/func_tests/main.cpp create mode 100644 tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/func_tests/main.cpp b/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..155abd1ae3d --- /dev/null +++ b/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/func_tests/main.cpp @@ -0,0 +1,282 @@ +#include + +#include +#include +#include + +#include "mpi/vavilov_v_min_elements_in_columns_of_matrix/include/ops_mpi.hpp" + +std::vector generate_rand_vec(int size, int lower_bound, int upper_bound) { + std::vector vec(size); + for (auto& n : vec) { + n = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return vec; +} + +std::vector> generate_rand_matr(int rows, int cols) { + std::vector> matr(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + matr[i] = generate_rand_vec(cols, -1000, 1000); + } + for (int j = 0; j < cols; j++) { + int r_row = std::rand() % rows; + matr[r_row][j] = INT_MIN; + } + return matr; +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, find_min_elem_in_col_400x500_matr) { + boost::mpi::communicator world; + const int rows = 400; + const int cols = 500; + + std::vector> global_matr; + std::vector min_col(cols, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = generate_rand_matr(rows, cols); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataPar->inputs_count = {rows, cols}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(min_col.data())); + taskDataPar->outputs_count.emplace_back(min_col.size()); + } + + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(cols, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataSeq->inputs_count = {rows, cols}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < cols; i++) { + ASSERT_EQ(min_col[i], INT_MIN); + } + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, find_min_elem_in_col_3000x3000_matr) { + boost::mpi::communicator world; + const int rows = 3000; + const int cols = 3000; + + std::vector> global_matr; + std::vector min_col(rows, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = generate_rand_matr(rows, cols); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataPar->inputs_count = {rows, cols}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(min_col.data())); + taskDataPar->outputs_count.emplace_back(min_col.size()); + } + + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(rows, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataSeq->inputs_count = {rows, cols}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < cols; i++) { + ASSERT_EQ(min_col[i], INT_MIN); + } + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, validation_input_empty_10x10_matr) { + boost::mpi::communicator world; + + if (world.rank() == 0) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matr = generate_rand_matr(rows, cols); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, validation_output_empty_10x10_matr) { + boost::mpi::communicator world; + + if (world.rank() == 0) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, validation_find_min_elem_in_col_10x0_matr) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 10; + const int cols = 0; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, validation_fails_on_invalid_output_of_size) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(rows - 1, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, validation_empty_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 0; + const int cols = 0; + + std::shared_ptr taskDataSeq = std::make_shared(); + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count = {rows, cols}; + std::vector vec_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, find_min_elem_in_fixed_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 4; + const int cols = 3; + + std::vector> fixed_matr = {{5, 3, 7}, {8, 1, 6}, {4, 9, 2}, {3, 0, 8}}; + + std::vector expected_min = {3, 0, 2}; + std::vector result(cols, INT_MAX); + + std::shared_ptr taskDataSeq = std::make_shared(); + + for (auto& row : fixed_matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(result.data())); + taskDataSeq->outputs_count.emplace_back(result.size()); + + vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < cols; i++) { + ASSERT_EQ(result[i], expected_min[i]) << "Mismatch in column " << i; + } + } +} diff --git a/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/include/ops_mpi.hpp b/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..cf4b8872571 --- /dev/null +++ b/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace vavilov_v_min_elements_in_columns_of_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector> local_input_; + std::vector res_; + boost::mpi::communicator world; +}; + +} // namespace vavilov_v_min_elements_in_columns_of_matrix_mpi diff --git a/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/perf_tests/main.cpp b/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..9a6d10a6477 --- /dev/null +++ b/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/perf_tests/main.cpp @@ -0,0 +1,105 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/vavilov_v_min_elements_in_columns_of_matrix/include/ops_mpi.hpp" + +std::vector generate_rand_vec(int size, int lower_bound, int upper_bound) { + std::vector vec(size); + for (auto& n : vec) { + n = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return vec; +} + +std::vector> generate_rand_matr(int rows, int cols) { + std::vector> matr(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + matr[i] = generate_rand_vec(cols, -1000, 1000); + } + for (int j = 0; j < cols; j++) { + int r_row = std::rand() % rows; + matr[r_row][j] = INT_MIN; + } + return matr; +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, test_pipeline_run_min) { + boost::mpi::communicator world; + std::vector> global_matr; + std::vector min_col; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_row; + int count_col; + + if (world.rank() == 0) { + count_row = 5000; + count_col = 5000; + global_matr = generate_rand_matr(count_row, count_col); + min_col.resize(count_col, INT_MAX); + + for (auto& row : global_matr) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_row); + taskDataPar->inputs_count.emplace_back(count_col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(min_col.data())); + taskDataPar->outputs_count.emplace_back(min_col.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t j = 0; j < min_col.size(); j++) { + ASSERT_EQ(min_col[j], INT_MIN); + } + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector> global_matr; + std::vector min_col; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_row; + int count_col; + + if (world.rank() == 0) { + count_row = 5000; + count_col = 5000; + global_matr = generate_rand_matr(count_row, count_col); + min_col.resize(count_col, INT_MAX); + + for (auto& row : global_matr) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_row); + taskDataPar->inputs_count.emplace_back(count_col); + + taskDataPar->outputs.emplace_back(reinterpret_cast(min_col.data())); + taskDataPar->outputs_count.emplace_back(min_col.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t j = 0; j < min_col.size(); j++) { + ASSERT_EQ(min_col[j], INT_MIN); + } + } +} diff --git a/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/src/ops_mpi.cpp b/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..12fe3c0fa1c --- /dev/null +++ b/tasks/mpi/vavilov_v_min_elements_in_columns_of_matrix/src/ops_mpi.cpp @@ -0,0 +1,164 @@ +#include "mpi/vavilov_v_min_elements_in_columns_of_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + int* input_row = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = input_row[j]; + } + } + res_.resize(cols); + return true; +} + +bool vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0) && + (taskData->outputs_count[0] == taskData->inputs_count[1])); +} + +bool vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + res_.resize(input_[0].size()); + + for (size_t i = 0; i < input_[0].size(); i++) { + int min = input_[0][i]; + for (size_t j = 1; j < input_.size(); j++) { + if (input_[j][i] < min) { + min = input_[j][i]; + } + } + res_[i] = min; + } + return true; +} + +bool vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + int* output_matr = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matr[i] = res_[i]; + } + return true; +} + +bool vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + return true; +} + +bool vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0) && + (taskData->outputs_count[0] == taskData->inputs_count[1])); + } + return true; +} + +bool vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + int rows = 0; + int cols = 0; + int delta_1; + int delta_2; + + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + } + + broadcast(world, rows, 0); + broadcast(world, cols, 0); + + delta_1 = rows / world.size(); + delta_2 = rows % world.size(); + + if (world.rank() == 0) { + input_.resize(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + int* input_matr = reinterpret_cast(taskData->inputs[i]); + input_[i].assign(input_matr, input_matr + cols); + } + + for (int proc = 1; proc < world.size(); proc++) { + int start_row = proc * delta_1 + std::min(proc, delta_2); + int counts = delta_1 + (proc < delta_2 ? 1 : 0); + for (int i = start_row; i < start_row + counts; i++) { + world.send(proc, 0, input_[i].data(), cols); + } + } + } + + int local_rows = delta_1 + (world.rank() < delta_2 ? 1 : 0); + + local_input_.resize(local_rows, std::vector(cols)); + + if (world.rank() == 0) { + std::copy(input_.begin(), input_.begin() + local_rows, local_input_.begin()); + } else { + for (int i = 0; i < local_rows; i++) { + world.recv(0, 0, local_input_[i].data(), cols); + } + } + + res_.resize(cols); + + std::vector tmp_min(local_input_[0].size(), INT_MAX); + for (size_t i = 0; i < local_input_[0].size(); i++) { + for (size_t j = 0; j < local_input_.size(); j++) { + tmp_min[i] = std::min(tmp_min[i], local_input_[j][i]); + } + } + + if (world.rank() == 0) { + std::vector min_s(res_.size(), INT_MAX); + std::copy(tmp_min.begin(), tmp_min.end(), min_s.begin()); + + for (int proc = 1; proc < world.size(); proc++) { + std::vector proc_min(res_.size()); + world.recv(proc, 0, proc_min.data(), res_.size()); + + for (size_t i = 0; i < res_.size(); i++) { + min_s[i] = std::min(min_s[i], proc_min[i]); + } + } + std::copy(min_s.begin(), min_s.end(), res_.begin()); + } else { + world.send(0, 0, tmp_min.data(), tmp_min.size()); + } + return true; +} + +bool vavilov_v_min_elements_in_columns_of_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + int* output_matr = reinterpret_cast(taskData->outputs[0]); + std::copy(res_.begin(), res_.end(), output_matr); + } + + return true; +} diff --git a/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/func_tests/main.cpp b/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..59f91918fad --- /dev/null +++ b/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/func_tests/main.cpp @@ -0,0 +1,173 @@ +#include + +#include + +#include "seq/vavilov_v_min_elements_in_columns_of_matrix/include/ops_seq.hpp" + +std::vector generate_rand_vec(int size, int lower_bound, int upper_bound) { + std::vector vec(size); + for (auto& n : vec) { + n = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return vec; +} + +std::vector> generate_rand_matr(int rows, int cols) { + std::vector> matr(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + matr[i] = generate_rand_vec(cols, -1000, 1000); + } + for (int j = 0; j < cols; j++) { + int r_row = std::rand() % rows; + matr[r_row][j] = INT_MIN; + } + return matr; +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_seq, find_min_elem_in_col_400x500_matr) { + const int rows = 400; + const int cols = 500; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + ASSERT_EQ(vec_res[j], INT_MIN); + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_seq, find_min_elem_in_col_3000x3000_matr) { + const int rows = 3000; + const int cols = 3000; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + ASSERT_EQ(vec_res[j], INT_MIN); + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_seq, validation_input_empty_10x10_matr) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matr = generate_rand_matr(rows, cols); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_seq, validation_output_empty_10x10_matr) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_seq, validation_find_min_elem_in_col_10x0_matr) { + const int rows = 10; + const int cols = 0; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_seq, validation_fails_on_invalid_output_of_size) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols - 1, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} diff --git a/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/include/ops_seq.hpp b/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..0a209462282 --- /dev/null +++ b/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace vavilov_v_min_elements_in_columns_of_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace vavilov_v_min_elements_in_columns_of_matrix_seq diff --git a/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/perf_tests/main.cpp b/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..6e189d6268a --- /dev/null +++ b/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/perf_tests/main.cpp @@ -0,0 +1,114 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/vavilov_v_min_elements_in_columns_of_matrix/include/ops_seq.hpp" + +std::vector generate_rand_vec(int size, int lower_bound, int upper_bound) { + std::vector vec(size); + for (auto& n : vec) { + n = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return vec; +} + +std::vector> generate_rand_matr(int rows, int cols) { + std::vector> matr(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + matr[i] = generate_rand_vec(cols, -1000, 1000); + } + for (int j = 0; j < cols; j++) { + int r_row = std::rand() % rows; + matr[r_row][j] = INT_MIN; + } + return matr; +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_seq, test_pipeline_run) { + const int rows = 5000; + const int cols = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = + std::make_shared(taskDataSeq); + + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Set the number of runs as needed + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (size_t i = 0; i < cols; i++) { + ASSERT_EQ(vec_res[i], INT_MIN); + } +} + +TEST(vavilov_v_min_elements_in_columns_of_matrix_seq, test_task_run) { + const int rows = 5000; + const int cols = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = + std::make_shared(taskDataSeq); + + std::vector> matr = generate_rand_matr(rows, cols); + + for (auto& row : matr) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector vec_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(vec_res.data())); + taskDataSeq->outputs_count.emplace_back(vec_res.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (size_t i = 0; i < rows; i++) { + ASSERT_EQ(vec_res[i], INT_MIN); + } +} diff --git a/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/src/ops_seq.cpp b/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..98819a41a86 --- /dev/null +++ b/tasks/seq/vavilov_v_min_elements_in_columns_of_matrix/src/ops_seq.cpp @@ -0,0 +1,55 @@ +#include "seq/vavilov_v_min_elements_in_columns_of_matrix/include/ops_seq.hpp" + +#include + +bool vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + int* input_row = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = input_row[j]; + } + } + res_.resize(cols); + return true; +} + +bool vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0) && + (taskData->outputs_count[0] == taskData->inputs_count[1])); +} + +bool vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + res_.resize(input_[0].size()); + + for (size_t i = 0; i < input_[0].size(); i++) { + int min = input_[0][i]; + for (size_t j = 1; j < input_.size(); j++) { + if (input_[j][i] < min) { + min = input_[j][i]; + } + } + res_[i] = min; + } + return true; +} + +bool vavilov_v_min_elements_in_columns_of_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + int* output_matr = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matr[i] = res_[i]; + } + return true; +} From 47d36f7b01c730c835cb782b0f0bc8510296443b Mon Sep 17 00:00:00 2001 From: KADCHDR <125730153+KADCHDR@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:33:09 +0300 Subject: [PATCH 130/155] =?UTF-8?q?=D0=9A=D0=BE=D0=B2=D0=B0=D0=BB=D1=8C?= =?UTF-8?q?=D1=87=D1=83=D0=BA=20=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD?= =?UTF-8?q?=D0=B4=D1=80.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20?= =?UTF-8?q?=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2013.=20=20=D0=9C?= =?UTF-8?q?=D0=B0=D0=BA=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D0=BE?= =?UTF-8?q?=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B5=20?= =?UTF-8?q?=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=BC?= =?UTF-8?q?=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#181)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Решение последовательной задачи: Проходим по каждой строке матрицы, сравнивая максимальное значение с общим максимальным значением матрицы Решение MPI задачи: Матрица разбивается между процессами, каждый процесс находит своё максимальное значение в выделенной ему части матрицы, затем определяется общий глобальный максимум. --------- Co-authored-by: Alexandr Kovalchuk --- .../func_tests/main.cpp | 379 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 +++ .../perf_tests/main.cpp | 118 ++++++ .../src/ops_mpi.cpp | 134 +++++++ .../func_tests/main.cpp | 262 ++++++++++++ .../include/ops_seq.hpp | 31 ++ .../perf_tests/main.cpp | 118 ++++++ .../src/ops_seq.cpp | 55 +++ 8 files changed, 1144 insertions(+) create mode 100644 tasks/mpi/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/kovalchuk_a_max_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/kovalchuk_a_max_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp b/tasks/mpi/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..3390a8aa72e --- /dev/null +++ b/tasks/mpi/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,379 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp" + +using namespace kovalchuk_a_max_of_vector_elements; + +std::vector getRandomVector(int sz, int min = MINIMALGEN, int max = MAXIMUMGEN); +std::vector> getRandomMatrix(int rows, int columns, int min = MINIMALGEN, int max = MAXIMUMGEN); + +std::vector getRandomVector(int sz, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min + gen() % (max - min + 1); + } + return vec; +} + +std::vector> getRandomMatrix(int rows, int columns, int min, int max) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = getRandomVector(columns, min, max); + } + return vec; +} + +TEST(kovalchuk_a_max_of_vector_elements, Test_Max_10_10) { + const int count_rows = 10; + const int count_columns = 10; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::mt19937 gen(42); + global_matrix = getRandomMatrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(kovalchuk_a_max_of_vector_elements, Test_Max_50x20) { + const int count_rows = 50; + const int count_columns = 20; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::mt19937 gen(42); + global_matrix = getRandomMatrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(kovalchuk_a_max_of_vector_elements, Test_Max_100_100) { + const int count_rows = 100; + const int count_columns = 100; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::mt19937 gen(42); + global_matrix = getRandomMatrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(kovalchuk_a_max_of_vector_elements, Test_Max_1_100) { + const int count_rows = 1; + const int count_columns = 100; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::mt19937 gen(42); + global_matrix = getRandomMatrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(kovalchuk_a_max_of_vector_elements, Test_Max_Empty_Matrix) { + const int count_rows = 0; + const int count_columns = 0; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = getRandomMatrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} +TEST(kovalchuk_a_max_of_vector_elements, Test_Max_4_4) { + const int count_rows = 4; + const int count_columns = 4; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = getRandomMatrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} +TEST(kovalchuk_a_max_of_vector_elements, Test_Max_Negative_Values) { + const int count_rows = 10; + const int count_columns = 10; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::mt19937 gen(42); + global_matrix = getRandomMatrix(count_rows, count_columns, -100, -1); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} +TEST(kovalchuk_a_max_of_vector_elements, Test_Max_Same_Values) { + const int count_rows = 10; + const int count_columns = 10; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::mt19937 gen(42); + global_matrix = getRandomMatrix(count_rows, count_columns, 20, 20); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..a4c6af014d4 --- /dev/null +++ b/tasks/mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kovalchuk_a_max_of_vector_elements { + +const int MINIMALGEN = -999; +const int MAXIMUMGEN = 999; + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res_{}; + boost::mpi::communicator world; +}; + +} // namespace kovalchuk_a_max_of_vector_elements \ No newline at end of file diff --git a/tasks/mpi/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..8b5f7f7e7d7 --- /dev/null +++ b/tasks/mpi/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,118 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp" + +using namespace kovalchuk_a_max_of_vector_elements; + +std::vector getRandomVector(int sz, int min = MINIMALGEN, int max = MAXIMUMGEN); +std::vector> getRandomMatrix(int rows, int columns, int min = MINIMALGEN, int max = MAXIMUMGEN); + +std::vector getRandomVector(int sz, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min + gen() % (max - min + 1); + } + return vec; +} + +std::vector> getRandomMatrix(int rows, int columns, int min, int max) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = getRandomVector(columns, min, max); + } + return vec; +} + +TEST(kovalchuk_a_max_of_vector_elements, test_pipeline_run) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + int ref = INT_MAX; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 gen(dev()); + int count_rows = 9999; + int count_columns = 9999; + global_matrix = getRandomMatrix(count_rows, count_columns); + size_t index = gen() % (static_cast(count_rows) * count_columns); + global_matrix[index / count_columns][index % count_columns] = ref; + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_max[0]); + } +} + +TEST(kovalchuk_a_max_of_vector_elements, test_task_run) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + int ref = INT_MAX; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 gen(dev()); + int count_rows = 3; + int count_columns = 3; + global_matrix = getRandomMatrix(count_rows, count_columns); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_max[0]); + } +} diff --git a/tasks/mpi/kovalchuk_a_max_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/kovalchuk_a_max_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..d2e99c35caa --- /dev/null +++ b/tasks/mpi/kovalchuk_a_max_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,134 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +bool kovalchuk_a_max_of_vector_elements::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + if (taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0) { + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], input_[i].begin()); + } + } else { + input_ = std::vector>(); + } + // Init value for output + res_ = INT_MIN; + return true; +} + +bool kovalchuk_a_max_of_vector_elements::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool kovalchuk_a_max_of_vector_elements::TestMPITaskSequential::run() { + internal_order_test(); + if (!input_.empty()) { + std::vector local_res(input_.size()); + for (unsigned int i = 0; i < input_.size(); i++) { + if (!input_[i].empty()) { + local_res[i] = *std::max_element(input_[i].begin(), input_[i].end()); + } else { + local_res[i] = INT_MIN; + } + } + res_ = *std::max_element(local_res.begin(), local_res.end()); + } else { + res_ = INT_MIN; + } + return true; +} + +bool kovalchuk_a_max_of_vector_elements::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool kovalchuk_a_max_of_vector_elements::TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + if (taskData->inputs_count[0] == 0 || taskData->inputs_count[1] == 0) { + delta = 0; + } else { + delta = std::max(1u, taskData->inputs_count[0] * taskData->inputs_count[1] / world.size()); + } + if (taskData->inputs_count[0] == 1 && taskData->inputs_count[1] == 1) { + delta = 1; + } + } + + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // Init vectors + unsigned int rows = taskData->inputs_count[0]; + unsigned int columns = taskData->inputs_count[1]; + if (rows > 0 && columns > 0) { + input_ = std::vector(rows * columns); + for (unsigned int i = 0; i < rows; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < columns; j++) { + input_[i * columns + j] = tmp_ptr[j]; + } + } + if (delta > 0) { + for (int proc = 1; proc < world.size(); proc++) { + std::span buffer(input_.data() + delta * proc, delta); + world.send(proc, 0, buffer.data(), buffer.size()); + } + } + } else { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, nullptr, 0); + } + } + } + + local_input_ = std::vector(delta); + if (world.rank() == 0) { + if (!input_.empty()) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + // Init value for output + res_ = INT_MIN; + return true; +} + +bool kovalchuk_a_max_of_vector_elements::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool kovalchuk_a_max_of_vector_elements::TestMPITaskParallel::run() { + internal_order_test(); + int local_res = local_input_.empty() ? INT_MIN : *std::max_element(local_input_.begin(), local_input_.end()); + reduce(world, local_res, res_, boost::mpi::maximum(), 0); + return true; +} + +bool kovalchuk_a_max_of_vector_elements::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp b/tasks/seq/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..43551c6112b --- /dev/null +++ b/tasks/seq/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,262 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp" + +using namespace kovalchuk_a_max_of_vector_elements_seq; + +std::vector getRandomVector(int sz, int min = MINIMALGEN, int max = MAXIMUMGEN); +std::vector> getRandomMatrix(int rows, int columns, int min = MINIMALGEN, int max = MAXIMUMGEN); + +std::vector getRandomVector(int sz, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min + gen() % (max - min + 1); + } + return vec; +} + +std::vector> getRandomMatrix(int rows, int columns, int min, int max) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = getRandomVector(columns, min, max); + } + return vec; +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_10_10) { + const int count_rows = 10; + const int count_columns = 10; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + global_matrix = getRandomMatrix(count_rows, count_columns); + std::random_device dev; + std::mt19937 gen(dev()); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index % count_columns] = INT_MAX; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + TestSequentialTask testSequentialTask(taskDataSeq); + ASSERT_EQ(testSequentialTask.validation(), true); + testSequentialTask.pre_processing(); + testSequentialTask.run(); + testSequentialTask.post_processing(); + + ASSERT_EQ(global_max[0], INT_MAX); +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_50_20) { + const int count_rows = 50; + const int count_columns = 20; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + global_matrix = getRandomMatrix(count_rows, count_columns); + std::random_device dev; + std::mt19937 gen(dev()); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index % count_columns] = INT_MAX; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + TestSequentialTask testSequentialTask(taskDataSeq); + ASSERT_EQ(testSequentialTask.validation(), true); + testSequentialTask.pre_processing(); + testSequentialTask.run(); + testSequentialTask.post_processing(); + + ASSERT_EQ(global_max[0], INT_MAX); +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_100_100) { + const int count_rows = 100; + const int count_columns = 100; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + global_matrix = getRandomMatrix(count_rows, count_columns); + std::random_device dev; + std::mt19937 gen(dev()); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index % count_columns] = INT_MAX; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + TestSequentialTask testSequentialTask(taskDataSeq); + ASSERT_EQ(testSequentialTask.validation(), true); + testSequentialTask.pre_processing(); + testSequentialTask.run(); + testSequentialTask.post_processing(); + + ASSERT_EQ(global_max[0], INT_MAX); +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_1_100) { + const int count_rows = 1; + const int count_columns = 100; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + global_matrix = getRandomMatrix(count_rows, count_columns); + std::random_device dev; + std::mt19937 gen(dev()); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index % count_columns] = INT_MAX; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + TestSequentialTask testSequentialTask(taskDataSeq); + ASSERT_EQ(testSequentialTask.validation(), true); + testSequentialTask.pre_processing(); + testSequentialTask.run(); + testSequentialTask.post_processing(); + + ASSERT_EQ(global_max[0], INT_MAX); +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_Empty_Matrix) { + const int count_rows = 10; + const int count_columns = 10; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + global_matrix = getRandomMatrix(count_rows, count_columns); + std::random_device dev; + std::mt19937 gen(dev()); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index % count_columns] = INT_MAX; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + TestSequentialTask testSequentialTask(taskDataSeq); + ASSERT_EQ(testSequentialTask.validation(), true); + testSequentialTask.pre_processing(); + testSequentialTask.run(); + testSequentialTask.post_processing(); + + ASSERT_EQ(global_max[0], INT_MAX); +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_4_4) { + const int count_rows = 4; + const int count_columns = 4; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + global_matrix = getRandomMatrix(count_rows, count_columns); + std::random_device dev; + std::mt19937 gen(dev()); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index % count_columns] = INT_MAX; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + TestSequentialTask testSequentialTask(taskDataSeq); + ASSERT_EQ(testSequentialTask.validation(), true); + testSequentialTask.pre_processing(); + testSequentialTask.run(); + testSequentialTask.post_processing(); + + ASSERT_EQ(global_max[0], INT_MAX); +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_Negative_Values) { + const int count_rows = 1; + const int count_columns = 100; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + global_matrix = getRandomMatrix(count_rows, count_columns, -1, -999); + std::random_device dev; + std::mt19937 gen(dev()); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index % count_columns] = INT_MAX; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + TestSequentialTask testSequentialTask(taskDataSeq); + ASSERT_EQ(testSequentialTask.validation(), true); + testSequentialTask.pre_processing(); + testSequentialTask.run(); + testSequentialTask.post_processing(); + + ASSERT_EQ(global_max[0], INT_MAX); +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_Same_Values) { + const int count_rows = 10; + const int count_columns = 100; + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + global_matrix = getRandomMatrix(count_rows, count_columns, 20, 20); + std::random_device dev; + std::mt19937 gen(dev()); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index % count_columns] = INT_MAX; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + TestSequentialTask testSequentialTask(taskDataSeq); + ASSERT_EQ(testSequentialTask.validation(), true); + testSequentialTask.pre_processing(); + testSequentialTask.run(); + testSequentialTask.post_processing(); + + ASSERT_EQ(global_max[0], INT_MAX); +} \ No newline at end of file diff --git a/tasks/seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp b/tasks/seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..ffb8c534b3e --- /dev/null +++ b/tasks/seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,31 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include + +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kovalchuk_a_max_of_vector_elements_seq { + +const int MINIMALGEN = -99; +const int MAXIMUMGEN = 99; + +class TestSequentialTask : public ppc::core::Task { + public: + explicit TestSequentialTask(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +} // namespace kovalchuk_a_max_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp b/tasks/seq/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..0650d9210fe --- /dev/null +++ b/tasks/seq/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,118 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp" + +using namespace kovalchuk_a_max_of_vector_elements_seq; + +std::vector getRandomVector(int sz, int min = MINIMALGEN, int max = MAXIMUMGEN); +std::vector> getRandomMatrix(int rows, int columns, int min = MINIMALGEN, int max = MAXIMUMGEN); + +std::vector getRandomVector(int sz, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min + gen() % (max - min + 1); + } + return vec; +} + +std::vector> getRandomMatrix(int rows, int columns, int min, int max) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = getRandomVector(columns, min, max); + } + return vec; +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, test_pipeline_run) { + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + int ref = INT_MAX; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + std::random_device dev; + std::mt19937 gen(dev()); + int count_rows = 9999; + int count_columns = 9999; + global_matrix = getRandomMatrix(count_rows, count_columns); + size_t index = gen() % (static_cast(count_rows) * count_columns); + global_matrix[index / count_columns][index % count_columns] = ref; + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + auto testSequentialTask = std::make_shared(taskDataSeq); + ASSERT_EQ(testSequentialTask->validation(), true); + testSequentialTask->pre_processing(); + testSequentialTask->run(); + testSequentialTask->post_processing(); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const std::chrono::high_resolution_clock::time_point start_time = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + return std::chrono::duration_cast>(std::chrono::high_resolution_clock::now() - + start_time) + .count(); + }; + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testSequentialTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_max[0]); +} + +TEST(kovalchuk_a_max_of_vector_elements_seq, test_task_run) { + std::vector> global_matrix; + std::vector global_max(1, INT_MIN); + int ref = INT_MAX; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + std::random_device dev; + std::mt19937 gen(dev()); + int count_rows = 3; + int count_columns = 3; + global_matrix = getRandomMatrix(count_rows, count_columns); + size_t index = gen() % (static_cast(count_rows) * count_columns); + global_matrix[index / count_columns][index % count_columns] = ref; + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataSeq->outputs_count.emplace_back(global_max.size()); + // Create Task + auto testSequentialTask = std::make_shared(taskDataSeq); + ASSERT_EQ(testSequentialTask->validation(), true); + testSequentialTask->pre_processing(); + testSequentialTask->run(); + testSequentialTask->post_processing(); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const std::chrono::high_resolution_clock::time_point start_time = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + return std::chrono::duration_cast>(std::chrono::high_resolution_clock::now() - + start_time) + .count(); + }; + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testSequentialTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_max[0]); +} \ No newline at end of file diff --git a/tasks/seq/kovalchuk_a_max_of_vector_elements/src/ops_seq.cpp b/tasks/seq/kovalchuk_a_max_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..d2f8bbcd13a --- /dev/null +++ b/tasks/seq/kovalchuk_a_max_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,55 @@ +// Copyright 2023 Nesterov Alexander +#include "seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp" + +#include +#include +#include +#include +#include + +bool kovalchuk_a_max_of_vector_elements_seq::TestSequentialTask::pre_processing() { + internal_order_test(); + // Init vectors + if (taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0) { + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], input_[i].begin()); + } + } else { + input_ = std::vector>(); + } + // Init value for output + res_ = INT_MIN; + return true; +} + +bool kovalchuk_a_max_of_vector_elements_seq::TestSequentialTask::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool kovalchuk_a_max_of_vector_elements_seq::TestSequentialTask::run() { + internal_order_test(); + if (!input_.empty()) { + std::vector local_res(input_.size()); + for (unsigned int i = 0; i < input_.size(); i++) { + if (!input_[i].empty()) { + local_res[i] = *std::max_element(input_[i].begin(), input_[i].end()); + } else { + local_res[i] = INT_MIN; + } + } + res_ = *std::max_element(local_res.begin(), local_res.end()); + } else { + res_ = INT_MIN; + } + return true; +} + +bool kovalchuk_a_max_of_vector_elements_seq::TestSequentialTask::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} \ No newline at end of file From ee68444a892f5bd81c50026b7e2125da48605386 Mon Sep 17 00:00:00 2001 From: VladimirSdobnov <114135450+VladimirSdobnov@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:35:46 +0300 Subject: [PATCH 131/155] =?UTF-8?q?=D0=A1=D0=B4=D0=BE=D0=B1=D0=BD=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=92=D0=BB=D0=B0=D0=B4=D0=B8=D0=BC=D0=B8=D1=80.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2010.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0?= =?UTF-8?q?=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#194)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit _**Последовательная задача**_ Переводим матрицу в вектор и по итератору складываем элементы полученного вектора **_Параллельная задача_** Переводим матрицу в вектор. Высчитываем размер и смещение векторов, которые потом рассылаем по прочим процессам с помощью функции 'scatterv'. В каждом процессе по итератору считаем сумму элементов переданного вектора. В конце объединяем результаты процессов с помощью функции 'reduce' --- .../func_tests/main.cpp | 304 ++++++++++++++++++ .../include/ops_mpi.hpp | 42 +++ .../perf_tests/main.cpp | 111 +++++++ .../src/ops_mpi.cpp | 113 +++++++ .../func_tests/main.cpp | 250 ++++++++++++++ .../include/ops_seq.hpp | 25 ++ .../perf_tests/main.cpp | 109 +++++++ .../src/ops_seq.cpp | 49 +++ 8 files changed, 1003 insertions(+) create mode 100644 tasks/mpi/Sdobnov_V_sum_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/Sdobnov_V_sum_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/Sdobnov_V_sum_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/Sdobnov_V_sum_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/Sdobnov_V_sum_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/Sdobnov_V_sum_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/Sdobnov_V_sum_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/Sdobnov_V_sum_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/Sdobnov_V_sum_of_vector_elements/func_tests/main.cpp b/tasks/mpi/Sdobnov_V_sum_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..5f66799c659 --- /dev/null +++ b/tasks/mpi/Sdobnov_V_sum_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,304 @@ +#include + +#include +#include +#include + +#include "mpi/Sdobnov_V_sum_of_vector_elements/include/ops_mpi.hpp" + +std::vector generate_random_vector(int size, int lower_bound = 0, int upper_bound = 50) { + std::vector res(size); + for (int i = 0; i < size; i++) { + res[i] = lower_bound + rand() % (upper_bound - lower_bound + 1); + } + return res; +} + +std::vector> generate_random_matrix(int rows, int columns, int lower_bound = 0, int upper_bound = 50) { + std::vector> res(rows); + for (int i = 0; i < rows; i++) { + res[i] = generate_random_vector(columns, lower_bound, upper_bound); + } + return res; + return std::vector>(); +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, EmptyInput) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Sdobnov_V_sum_of_vector_elements::SumVecElemParallel test(taskDataPar); + if (world.rank() == 0) { + ASSERT_FALSE(test.validation()); + } +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, EmptyOutput) { + boost::mpi::communicator world; + int rows = 10; + int columns = 10; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + } + Sdobnov_V_sum_of_vector_elements::SumVecElemParallel test(taskDataPar); + if (world.rank() == 0) { + ASSERT_FALSE(test.validation()); + } +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, EmptyMatrix) { + boost::mpi::communicator world; + int rows = 0; + int columns = 0; + int res = 0; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + } + + Sdobnov_V_sum_of_vector_elements::SumVecElemParallel test(taskDataPar); + + test.validation(); + test.pre_processing(); + test.run(); + test.post_processing(); + + if (world.rank() == 0) { + int respar = res; + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential testseq(taskDataPar); + testseq.validation(); + testseq.pre_processing(); + testseq.run(); + testseq.post_processing(); + ASSERT_EQ(respar, res); + } +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, Matrix1x1) { + boost::mpi::communicator world; + + int rows = 1; + int columns = 1; + int res = 0; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + } + + Sdobnov_V_sum_of_vector_elements::SumVecElemParallel test(taskDataPar); + test.validation(); + test.pre_processing(); + test.run(); + test.post_processing(); + + if (world.rank() == 0) { + int respar = res; + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential testseq(taskDataPar); + testseq.validation(); + testseq.pre_processing(); + testseq.run(); + testseq.post_processing(); + ASSERT_EQ(respar, res); + } +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, Matrix5x1) { + boost::mpi::communicator world; + + int rows = 5; + int columns = 1; + int res = 0; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + } + + Sdobnov_V_sum_of_vector_elements::SumVecElemParallel test(taskDataPar); + test.validation(); + test.pre_processing(); + test.run(); + test.post_processing(); + + if (world.rank() == 0) { + int respar = res; + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential testseq(taskDataPar); + testseq.validation(); + testseq.pre_processing(); + testseq.run(); + testseq.post_processing(); + ASSERT_EQ(respar, res); + } +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, Matrix10x10) { + boost::mpi::communicator world; + + int rows = 10; + int columns = 10; + int res = 0; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + } + + Sdobnov_V_sum_of_vector_elements::SumVecElemParallel test(taskDataPar); + test.validation(); + test.pre_processing(); + test.run(); + test.post_processing(); + + if (world.rank() == 0) { + int respar = res; + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential testseq(taskDataPar); + testseq.validation(); + testseq.pre_processing(); + testseq.run(); + testseq.post_processing(); + ASSERT_EQ(respar, res); + } +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, Matrix100x100) { + boost::mpi::communicator world; + + int rows = 100; + int columns = 100; + int res = 0; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + } + + Sdobnov_V_sum_of_vector_elements::SumVecElemParallel test(taskDataPar); + test.validation(); + test.pre_processing(); + test.run(); + test.post_processing(); + + if (world.rank() == 0) { + int respar = res; + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential testseq(taskDataPar); + testseq.validation(); + testseq.pre_processing(); + testseq.run(); + testseq.post_processing(); + ASSERT_EQ(respar, res); + } +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, Matrix100x10) { + boost::mpi::communicator world; + + int rows = 100; + int columns = 10; + int res = 0; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + } + + Sdobnov_V_sum_of_vector_elements::SumVecElemParallel test(taskDataPar); + test.validation(); + test.pre_processing(); + test.run(); + test.post_processing(); + + if (world.rank() == 0) { + int respar = res; + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential testseq(taskDataPar); + testseq.validation(); + testseq.pre_processing(); + testseq.run(); + testseq.post_processing(); + ASSERT_EQ(respar, res); + } +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, Matrix10x100) { + boost::mpi::communicator world; + + int rows = 10; + int columns = 100; + int res = 0; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + } + + Sdobnov_V_sum_of_vector_elements::SumVecElemParallel test(taskDataPar); + test.validation(); + test.pre_processing(); + test.run(); + test.post_processing(); + + if (world.rank() == 0) { + int respar = res; + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential testseq(taskDataPar); + testseq.validation(); + testseq.pre_processing(); + testseq.run(); + testseq.post_processing(); + ASSERT_EQ(respar, res); + } +} diff --git a/tasks/mpi/Sdobnov_V_sum_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/Sdobnov_V_sum_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..107d291122a --- /dev/null +++ b/tasks/mpi/Sdobnov_V_sum_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,42 @@ +// Copyright 2024 Sdobnov Vladimir +#pragma once +#include + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace Sdobnov_V_sum_of_vector_elements { + +int vec_elem_sum(const std::vector& vec); + +class SumVecElemSequential : public ppc::core::Task { + public: + explicit SumVecElemSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; + +class SumVecElemParallel : public ppc::core::Task { + public: + explicit SumVecElemParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res_{}; + boost::mpi::communicator world; +}; + +} // namespace Sdobnov_V_sum_of_vector_elements \ No newline at end of file diff --git a/tasks/mpi/Sdobnov_V_sum_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/Sdobnov_V_sum_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..93856a4eb82 --- /dev/null +++ b/tasks/mpi/Sdobnov_V_sum_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,111 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/Sdobnov_V_sum_of_vector_elements/include/ops_mpi.hpp" + +std::vector generate_random_vector(int size, int lower_bound = 0, int upper_bound = 50) { + std::vector res(size); + for (int i = 0; i < size; i++) { + res[i] = lower_bound + rand() % (upper_bound - lower_bound + 1); + } + return res; +} + +std::vector> generate_random_matrix(int rows, int columns, int lower_bound = 0, int upper_bound = 50) { + std::vector> res(rows); + for (int i = 0; i < rows; i++) { + res[i] = generate_random_vector(columns, lower_bound, upper_bound); + } + return res; + return std::vector>(); +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, test_pipeline_run) { + boost::mpi::communicator world; + int rows = 10000; + int columns = 10000; + int res; + std::vector> input = generate_random_matrix(rows, columns, 1, 1); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + } + auto test = std::make_shared(taskDataPar); + + test->validation(); + test->pre_processing(); + test->run(); + test->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(test); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(rows * columns, res); + } +} + +TEST(Sdobnov_V_sum_of_vector_elements_par, test_task_run) { + boost::mpi::communicator world; + int rows = 10000; + int columns = 10000; + int res; + std::vector> input = generate_random_matrix(rows, columns, 1, 1); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + } + auto test = std::make_shared(taskDataPar); + + test->validation(); + test->pre_processing(); + test->run(); + test->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(test); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(rows * columns, res); + } +} \ No newline at end of file diff --git a/tasks/mpi/Sdobnov_V_sum_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/Sdobnov_V_sum_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..4e168bbd8d3 --- /dev/null +++ b/tasks/mpi/Sdobnov_V_sum_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,113 @@ +// Copyright 2024 Sdobnov Vladimir +#include "mpi/Sdobnov_V_sum_of_vector_elements/include/ops_mpi.hpp" + +#include +#include + +int Sdobnov_V_sum_of_vector_elements::vec_elem_sum(const std::vector& vec) { + int res = 0; + for (int elem : vec) { + res += elem; + } + return res; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int columns = taskData->inputs_count[1]; + + input_ = std::vector(rows * columns); + + for (int i = 0; i < rows; i++) { + auto* p = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < columns; j++) { + input_[i * columns + j] = p[j]; + } + } + + return true; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemSequential::validation() { + internal_order_test(); + return (taskData->inputs_count.size() == 2 && taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && + taskData->outputs_count[0] == 1); +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemSequential::run() { + internal_order_test(); + res_ = vec_elem_sum(input_); + return true; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + int rows = taskData->inputs_count[0]; + int columns = taskData->inputs_count[1]; + + input_ = std::vector(rows * columns); + + for (int i = 0; i < rows; i++) { + auto* p = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < columns; j++) { + input_[i * columns + j] = p[j]; + } + } + } + + return true; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemParallel::validation() { + internal_order_test(); + if (world.rank() == 0) + return (taskData->inputs_count.size() == 2 && taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && + taskData->outputs_count.size() == 1 && taskData->outputs_count[0] == 1); + return true; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemParallel::run() { + internal_order_test(); + + int input_size = 0; + int local_rank = world.rank(); + int world_size = world.size(); + if (local_rank == 0) input_size = input_.size(); + boost::mpi::broadcast(world, input_size, 0); + + int elem_per_procces = input_size / world_size; + int residual_elements = input_size % world_size; + + int process_count = elem_per_procces + (local_rank < residual_elements ? 1 : 0); + + std::vector counts(world_size); + std::vector displacment(world_size); + + for (int i = 0; i < world_size; i++) { + counts[i] = elem_per_procces + (i < residual_elements ? 1 : 0); + displacment[i] = i * elem_per_procces + std::min(i, residual_elements); + } + + local_input_.resize(counts[local_rank]); + boost::mpi::scatterv(world, input_.data(), counts, displacment, local_input_.data(), process_count, 0); + + int process_sum = vec_elem_sum(local_input_); + boost::mpi::reduce(world, process_sum, res_, std::plus(), 0); + + return true; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} \ No newline at end of file diff --git a/tasks/seq/Sdobnov_V_sum_of_vector_elements/func_tests/main.cpp b/tasks/seq/Sdobnov_V_sum_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..4b991fece3e --- /dev/null +++ b/tasks/seq/Sdobnov_V_sum_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,250 @@ +// Copyright 2024 Sdobnov Vladimir + +#include + +#include + +#include "seq/Sdobnov_V_sum_of_vector_elements/include/ops_seq.hpp" + +std::vector generate_random_vector(int size, int lower_bound = 0, int upper_bound = 50) { + std::vector res(size); + for (int i = 0; i < size; i++) { + res[i] = lower_bound + rand() % (upper_bound - lower_bound + 1); + } + return res; +} + +std::vector> generate_random_matrix(int rows, int columns, int lower_bound = 0, int upper_bound = 50) { + std::vector> res(rows); + for (int i = 0; i < rows; i++) { + res[i] = generate_random_vector(columns, lower_bound, upper_bound); + } + return res; + return std::vector>(); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, EmptyInput) { + std::shared_ptr taskDataPar = std::make_shared(); + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential test(taskDataPar); + ASSERT_FALSE(test.validation()); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, EmptyOutput) { + int rows = 10; + int columns = 10; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential test(taskDataPar); + ASSERT_FALSE(test.validation()); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, EmptyMatrix) { + int rows = 0; + int columns = 0; + int res; + std::vector> input = generate_random_matrix(rows, columns); + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential test(taskDataPar); + + ASSERT_TRUE(test.validation()); + test.pre_processing(); + test.run(); + test.post_processing(); + ASSERT_EQ(0, res); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, Matrix1x1) { + int rows = 1; + int columns = 1; + int res; + std::vector> input = generate_random_matrix(rows, columns); + int sum = 0; + for (const std::vector &vec : input) { + for (int elem : vec) { + sum += elem; + } + } + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential test(taskDataPar); + + ASSERT_TRUE(test.validation()); + test.pre_processing(); + test.run(); + test.post_processing(); + ASSERT_EQ(sum, res); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, Matrix5x1) { + int rows = 5; + int columns = 1; + int res; + std::vector> input = generate_random_matrix(rows, columns); + int sum = 0; + for (const std::vector &vec : input) { + for (int elem : vec) { + sum += elem; + } + } + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential test(taskDataPar); + + ASSERT_TRUE(test.validation()); + test.pre_processing(); + test.run(); + test.post_processing(); + ASSERT_EQ(sum, res); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, Matrix10x10) { + int rows = 10; + int columns = 10; + int res; + std::vector> input = generate_random_matrix(rows, columns); + int sum = 0; + for (const std::vector &vec : input) { + for (int elem : vec) { + sum += elem; + } + } + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential test(taskDataPar); + + ASSERT_TRUE(test.validation()); + test.pre_processing(); + test.run(); + test.post_processing(); + ASSERT_EQ(sum, res); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, Matrix100x100) { + int rows = 100; + int columns = 100; + int res; + std::vector> input = generate_random_matrix(rows, columns); + int sum = 0; + for (const std::vector &vec : input) { + for (int elem : vec) { + sum += elem; + } + } + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential test(taskDataPar); + + ASSERT_TRUE(test.validation()); + test.pre_processing(); + test.run(); + test.post_processing(); + ASSERT_EQ(sum, res); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, Matrix100x10) { + int rows = 100; + int columns = 10; + int res; + std::vector> input = generate_random_matrix(rows, columns); + int sum = 0; + for (const std::vector &vec : input) { + for (int elem : vec) { + sum += elem; + } + } + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential test(taskDataPar); + + ASSERT_TRUE(test.validation()); + test.pre_processing(); + test.run(); + test.post_processing(); + ASSERT_EQ(sum, res); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, Matrix10x100) { + int rows = 10; + int columns = 100; + int res; + std::vector> input = generate_random_matrix(rows, columns); + int sum = 0; + for (const std::vector &vec : input) { + for (int elem : vec) { + sum += elem; + } + } + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + + Sdobnov_V_sum_of_vector_elements::SumVecElemSequential test(taskDataPar); + + ASSERT_TRUE(test.validation()); + test.pre_processing(); + test.run(); + test.post_processing(); + ASSERT_EQ(sum, res); +} \ No newline at end of file diff --git a/tasks/seq/Sdobnov_V_sum_of_vector_elements/include/ops_seq.hpp b/tasks/seq/Sdobnov_V_sum_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..8e1d28426ca --- /dev/null +++ b/tasks/seq/Sdobnov_V_sum_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,25 @@ +// Copyright 2024 Sdobnov Vladimir +#pragma once +#include + +#include + +#include "core/task/include/task.hpp" + +namespace Sdobnov_V_sum_of_vector_elements { + +int vec_elem_sum(const std::vector& vec); + +class SumVecElemSequential : public ppc::core::Task { + public: + explicit SumVecElemSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; +} // namespace Sdobnov_V_sum_of_vector_elements \ No newline at end of file diff --git a/tasks/seq/Sdobnov_V_sum_of_vector_elements/perf_tests/main.cpp b/tasks/seq/Sdobnov_V_sum_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..b2772375a7f --- /dev/null +++ b/tasks/seq/Sdobnov_V_sum_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,109 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/Sdobnov_V_sum_of_vector_elements/include/ops_seq.hpp" + +std::vector generate_random_vector(int size, int lower_bound = 0, int upper_bound = 50) { + std::vector res(size); + for (int i = 0; i < size; i++) { + res[i] = lower_bound + rand() % (upper_bound - lower_bound + 1); + } + return res; +} + +std::vector> generate_random_matrix(int rows, int columns, int lower_bound = 0, int upper_bound = 50) { + std::vector> res(rows); + for (int i = 0; i < rows; i++) { + res[i] = generate_random_vector(columns, lower_bound, upper_bound); + } + return res; + return std::vector>(); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, test_pipeline_run) { + int rows = 10000; + int columns = 10000; + int res; + std::vector> input = generate_random_matrix(rows, columns); + int sum = 0; + for (const std::vector &vec : input) { + for (int elem : vec) { + sum += elem; + } + } + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + + // Create Task + auto test = std::make_shared(taskDataPar); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(test); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(sum, res); +} + +TEST(Sdobnov_V_sum_of_vector_elements_seq, test_task_run) { + int rows = 10000; + int columns = 10000; + int res; + std::vector> input = generate_random_matrix(rows, columns); + int sum = 0; + for (const std::vector &vec : input) { + for (int elem : vec) { + sum += elem; + } + } + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(columns); + for (long unsigned int i = 0; i < input.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input[i].data())); + } + taskDataPar->outputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&res)); + + // Create Task + auto test = std::make_shared(taskDataPar); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(test); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(sum, res); +} \ No newline at end of file diff --git a/tasks/seq/Sdobnov_V_sum_of_vector_elements/src/ops_seq.cpp b/tasks/seq/Sdobnov_V_sum_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..680da33e4ee --- /dev/null +++ b/tasks/seq/Sdobnov_V_sum_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,49 @@ +// Copyright 2024 Sdobnov Vladimir +#include "seq/Sdobnov_V_sum_of_vector_elements/include/ops_seq.hpp" + +#include +#include + +int Sdobnov_V_sum_of_vector_elements::vec_elem_sum(const std::vector& vec) { + int res = 0; + for (int elem : vec) { + res += elem; + } + return res; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int columns = taskData->inputs_count[1]; + + input_ = std::vector(rows * columns); + + for (int i = 0; i < rows; i++) { + auto* p = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < columns; j++) { + input_[i * columns + j] = p[j]; + } + } + + return true; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemSequential::validation() { + internal_order_test(); + return (taskData->inputs_count.size() == 2 && taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && + taskData->outputs_count.size() == 1 && taskData->outputs_count[0] == 1); +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemSequential::run() { + internal_order_test(); + res_ = vec_elem_sum(input_); + return true; +} + +bool Sdobnov_V_sum_of_vector_elements::SumVecElemSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} From 7c9206c9fa855356fd17e4a91b1b2df77498ca34 Mon Sep 17 00:00:00 2001 From: ovVrLFg8ks <122876910+ovVrLFg8ks@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:36:56 +0300 Subject: [PATCH 132/155] =?UTF-8?q?=D0=9E=D1=82=D1=83=D1=80=D0=B8=D0=BD=20?= =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4=D1=80.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2015.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA=D1=81=D0=B8?= =?UTF-8?q?=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0?= =?UTF-8?q?=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8?= =?UTF-8?q?=D1=86=D1=8B.=20(#188)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **_Это доработка #75 , который был зареверчен #183_** **Описание последовательной задачи:** Для каждой строки m вычисляется максимум и записывается в вектор максимумов размера m. **Описание MPI задачи:** Каждый процесс получает ширину и высоту матрицы (n и m соответственно), процесс 0-го ранга получает матрицу как одномерный вектор и вектор для записи результатов. Далее в процессе 0-го ранга: Основываясь на количестве процессов ненулевого ранга запускается цикл, в котором процессам, которым будет послана строка размера n посылается сигнал, затем и сама строка. После в этом же цикле 0-ой процесс получает от ненулевых процессов результат. Если количество процессов превышает кол-во строк в итерации цикла, то им пока что не посылается ничего. После цикла всем процессам посылается сигнал о завершении работы. В процессах ненулевого ранга: Запускается цикл, в котором сначала проверяется сигнал о выходе, и если сигнал невыходной, получается строка, у которой вычисляется максимум, который отправляется нулевому процессу. --- .../func_tests/main.cpp | 326 ++++++++++++++++++ .../include/ops_mpi.hpp | 58 ++++ .../perf_tests/main.cpp | 93 +++++ .../src/ops_mpi.cpp | 151 ++++++++ .../func_tests/main.cpp | 122 +++++++ .../include/ops_seq.hpp | 25 ++ .../perf_tests/main.cpp | 84 +++++ .../src/ops_seq.cpp | 37 ++ 8 files changed, 896 insertions(+) create mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp create mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..f04d61b8035 --- /dev/null +++ b/tasks/mpi/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,326 @@ +#include + +#include +#include +#include +#include + +#include "mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp" + +std::vector oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +// squarelike +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_1) { + size_t n = 5; + size_t m = 5; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +// rectangular +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_2) { + size_t n = 10; + size_t m = 15; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_3) { + size_t n = 15; + size_t m = 10; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_4) { + size_t n = 1; + size_t m = 15; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_5) { + size_t n = 15; + size_t m = 1; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_functest, Test_Max_EMPTY) { + size_t n = 0; + size_t m = 0; + + boost::mpi::communicator world; + + std::vector global_mat; + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + global_mat = oturin_a_max_values_by_rows_matrix_mpi::getRandomVector(n * m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (size_t i = 0; i < global_max.size(); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..a4c5cbf561d --- /dev/null +++ b/tasks/mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp @@ -0,0 +1,58 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace oturin_a_max_values_by_rows_matrix_mpi { + +std::vector getRandomVector(int sz); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + size_t n = 0; + size_t m = 0; + std::vector input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + /* + m maxes: + ^ + | -9 99 : 99 + | 12 06 : 12 + +------> n + */ + size_t n = 0; + size_t m = 0; + std::vector input_, local_input_; + std::vector res; + + boost::mpi::communicator world; +}; + +} // namespace oturin_a_max_values_by_rows_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..15bc85e683a --- /dev/null +++ b/tasks/mpi/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,93 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp" + +TEST(oturin_a_max_values_by_rows_matrix_mpi_perftest, test_pipeline_run) { + size_t n = 300; + size_t m = 300; + + boost::mpi::communicator world; + + std::vector global_mat(n * m, 1); + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, global_max[0]); + } +} + +TEST(oturin_a_max_values_by_rows_matrix_mpi_perftest, test_task_run) { + size_t n = 300; + size_t m = 300; + + boost::mpi::communicator world; + + std::vector global_mat(n * m, 1); + std::vector global_max(m, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, global_max[0]); + } +} diff --git a/tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..2e8f40c19d4 --- /dev/null +++ b/tasks/mpi/oturin_a_max_values_by_rows_matrix/src/ops_mpi.cpp @@ -0,0 +1,151 @@ +#include "mpi/oturin_a_max_values_by_rows_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + n = (size_t)(taskData->inputs_count[0]); + m = (size_t)(taskData->inputs_count[1]); + input_ = std::vector(n * m); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_ = std::vector(tmp_ptr, tmp_ptr + n * m); + // Init values for output + res = std::vector(m, 0); + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check elements count in i/o + // m & maxes: + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < m; i++) { + res[i] = *std::max_element(input_.begin() + i * n, input_.begin() + (i + 1) * n); + } + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + for (size_t i = 0; i < m; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} +//////////////////////////////////////////////////////////////////////////////////////// + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[1] == taskData->outputs_count[0]; + } + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + // Init vectors + n = (size_t)(taskData->inputs_count[0]); + m = (size_t)(taskData->inputs_count[1]); + + if (world.rank() == 0) { + input_ = std::vector(n * m); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_ = std::vector(tmp_ptr, tmp_ptr + n * m); + // Init values for output + res = std::vector(m, 0); + } + + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + const int TAG_EXIT = 1; + const int TAG_TOBASE = 2; + const int TAG_TOSAT = 3; + +#if defined(_MSC_VER) && !defined(__clang__) + if (world.size() == 1) { + for (size_t i = 0; i < m; i++) { + res[i] = *std::max_element(input_.begin() + i * n, input_.begin() + (i + 1) * n); + } + return true; + } +#endif + + if (world.rank() == 0) { // base + size_t satellites = world.size() - 1; + + int proc_exit = 0; + int proc_wait = 1; + + if (m == 0 || n == 0) { + for (size_t i = 0; i < satellites; i++) { + world.send(i + 1, TAG_EXIT, &proc_exit, 1); + } + return true; + } + + int *arr = new int[m * n]; + int *maxes = new int[m]; + + std::copy(input_.begin(), input_.end(), arr); + + size_t row = 0; + while (row < m) { + for (size_t i = 0; i < std::min(satellites, m - row); i++) { + world.send(i + 1, TAG_EXIT, &proc_wait, 1); + world.send(i + 1, TAG_TOSAT, &arr[(row + i) * n], n); + } + + for (size_t i = 0; i < std::min(satellites, m - row); i++) { + world.recv(i + 1, TAG_TOBASE, &maxes[row + i], 1); + } + row += satellites; + } + for (size_t i = 0; i < satellites; i++) // close all satellite processes + world.send(i + 1, TAG_EXIT, &proc_exit, 1); + + res.assign(maxes, maxes + m); + + delete[] arr; + delete[] maxes; + } else { // satelleite + int *arr = new int[n]; + int proc_exit; + while (true) { + int out = INT_MIN; + world.recv(0, TAG_EXIT, &proc_exit, 1); + if (proc_exit == 0) break; + + world.recv(0, TAG_TOSAT, arr, n); + + for (size_t i = 0; i < n; i++) out = std::max(arr[i], out); + + world.send(0, TAG_TOBASE, &out, 1); + } + delete[] arr; + } + return true; +} + +bool oturin_a_max_values_by_rows_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (size_t i = 0; i < m; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..886af56a69d --- /dev/null +++ b/tasks/seq/oturin_a_max_values_by_rows_matrix/func_tests/main.cpp @@ -0,0 +1,122 @@ +#include + +#include +#include + +#include "seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_5_5) { + size_t n = 5; + size_t m = 5; + + // Create data + std::vector in(n * m); + std::vector out(m, 0); + std::vector maxes(m); + + std::iota(std::begin(in), std::end(in), 1); + for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(maxes, out); +} + +TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_10_5) { + size_t n = 10; + size_t m = 5; + + // Create data + std::vector in(n * m); + std::vector out(m, 0); + std::vector maxes(m); + + std::iota(std::begin(in), std::end(in), 1); + for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(maxes, out); +} + +TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_5_10) { + size_t n = 5; + size_t m = 10; + + // Create data + std::vector in(n * m); + std::vector out(m, 0); + std::vector maxes(m); + + std::iota(std::begin(in), std::end(in), 1); + for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(maxes, out); +} + +TEST(oturin_a_max_values_by_rows_matrix_seq_functest, Test_Max_EMPTY) { + size_t n = 0; + size_t m = 0; + + // Create data + std::vector in(n * m); + std::vector out(m, 0); + std::vector maxes(m); + + std::iota(std::begin(in), std::end(in), 1); + for (size_t i = 0; i < m; i++) maxes[i] = (i + 1) * n; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(maxes, out); +} diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..ed042671b44 --- /dev/null +++ b/tasks/seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace oturin_a_max_values_by_rows_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + size_t n = 0; + size_t m = 0; + std::vector input_; + std::vector res; +}; + +} // namespace oturin_a_max_values_by_rows_matrix_seq diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..84af1763195 --- /dev/null +++ b/tasks/seq/oturin_a_max_values_by_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,84 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp" + +TEST(oturin_a_max_values_by_rows_matrix_seq_perftest, test_pipeline_run) { + size_t n = 500; + size_t m = 500; + + // Create data + std::vector in(n * m, 0); + std::vector out(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(m, taskDataSeq->outputs_count.back()); +} + +TEST(oturin_a_max_values_by_rows_matrix_seq_perftest, test_task_run) { + size_t n = 500; + size_t m = 500; + + // Create data + std::vector in(n * m, 0); + std::vector out(m, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(m, taskDataSeq->outputs_count.back()); +} diff --git a/tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp b/tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..e853592808b --- /dev/null +++ b/tasks/seq/oturin_a_max_values_by_rows_matrix/src/ops_seq.cpp @@ -0,0 +1,37 @@ +#include "seq/oturin_a_max_values_by_rows_matrix/include/ops_seq.hpp" + +#include + +bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + n = (size_t)(taskData->inputs_count[0]); + m = (size_t)(taskData->inputs_count[1]); + input_ = std::vector(n * m); + int *tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_ = std::vector(tmp_ptr, tmp_ptr + n * m); + // Init values for output + res = std::vector(m, 0); + return true; +} + +bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check elements count in i/o + // m & maxes: + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < m; i++) res[i] = *std::max_element(input_.begin() + i * n, input_.begin() + (i + 1) * n); + return true; +} + +bool oturin_a_max_values_by_rows_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (size_t i = 0; i < m; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} From 5d4ed84247ae93847041f8c36b712639e7ee7264 Mon Sep 17 00:00:00 2001 From: Seraphim Volochaev <116020688+Svoloch2940194@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:37:55 +0300 Subject: [PATCH 133/155] =?UTF-8?q?fix=20Revert=20"=D0=92=D0=BE=D0=BB?= =?UTF-8?q?=D0=BE=D1=87=D0=B0=D0=B5=D0=B2=20=D0=A1=D0=B5=D1=80=D0=B0=D1=84?= =?UTF-8?q?=D0=B8=D0=BC=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20?= =?UTF-8?q?=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2027:=20=D0=9F=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=87=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0?= =?UTF-8?q?=20=D0=BD=D0=B5=D1=81=D0=BE=D0=B2=D0=BF=D0=B0=D0=B4=D0=B0=D1=8E?= =?UTF-8?q?=D1=89=D0=B8=D1=85=20=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=B4=D0=B2=D1=83=D1=85=20=D1=81=D1=82=D1=80=D0=BE?= =?UTF-8?q?=D0=BA"=20(#232)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Последовательный вариант: Input: 2 строки (могут быть разных длин). Если длины не совпадают, то начальное состояние ответа: разность длин 2 строк, иначе 0. Проходимся линейно по строкам и сравниваем по id. Если буквы на одинаковых позициях не совпадают, то прибавляем к ответу 2, иначе идем дальше до минимальной длины из 2 строк. pre_processing: Получение данных. Мы берем 2 строки и смотрим на их длину. Если разные, то обрезаем большую до длины меньшей, а разницу в длине помещаем в ответ. validation: проверяем входные и выходные данные по количеству ( входных должно быть 2, а выходных 1) run: идем по 2 строкам и сравниваем символы по id. Если не совпадают, то прибавляем 2. Иначе идем дальше. post_processing: запись результата. Параллельный вариант: идея та же, но мы бьем строки на потоки. Если у нас размер минимальной строки size, то мы size делим на world.size() и получаем сколько должен обработать каждый поток символов. далее бьем на потоки таким образом: 1 поток: [0, x] 2 поток: [x+1, 2x] 3 поток: [2x+1, 3*x] ... n-й поток: [(n-1)x+1,nx] ... Про фикс: Была бага в тесте. Программа возвращала число 0 (так как это верный ответ), а в ground_truth лежала 2) --------- Co-authored-by: Svolota --- .../func_tests/main.cpp | 229 ++++++++++++++++++ .../include/ops_mpi.hpp | 44 ++++ .../perf_tests/main.cpp | 91 +++++++ .../src/ops_mpi.cpp | 126 ++++++++++ .../func_tests/main.cpp | 184 ++++++++++++++ .../include/ops_seq.hpp | 23 ++ .../perf_tests/main.cpp | 81 +++++++ .../src/ops_seq.cpp | 50 ++++ 8 files changed, 828 insertions(+) create mode 100644 tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp create mode 100644 tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp create mode 100644 tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp create mode 100644 tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp create mode 100644 tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp create mode 100644 tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp create mode 100644 tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp create mode 100644 tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp diff --git a/tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp b/tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp new file mode 100644 index 00000000000..c9d9adcd7e9 --- /dev/null +++ b/tasks/mpi/volochaev_s_count_characters_27/func_tests/main.cpp @@ -0,0 +1,229 @@ +#include + +#include +#include +#include +#include + +#include "mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp" + +namespace volochaev_s_count_characters_27_mpi { + +std::string get_random_string(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + + std::string vec(sz, ' '); + for (int i = 0; i < sz; i++) { + vec[i] += gen() % 256; + } + return vec; +} + +} // namespace volochaev_s_count_characters_27_mpi + +TEST(volochaev_s_count_characters_27_MPI, Test_0) { + boost::mpi::communicator world; + std::vector global_vec(1, volochaev_s_count_characters_27_mpi::get_random_string(20)); + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(volochaev_s_count_characters_27_MPI, Test_1) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int size_str1 = 240; + const int size_str2 = 120; + global_vec = {volochaev_s_count_characters_27_mpi::get_random_string(size_str1), + volochaev_s_count_characters_27_mpi::get_random_string(size_str2)}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + } + + volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_diff[0], global_diff[0]); + } +} + +TEST(volochaev_s_count_characters_27_MPI, Test_2) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_diff(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int string_sz = 120; + std::string s = volochaev_s_count_characters_27_mpi::get_random_string(string_sz); + global_vec = {s, s}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + } + + volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_diff(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); + taskDataSeq->outputs_count.emplace_back(reference_diff.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_diff[0], global_diff[0]); + } +} + +TEST(volochaev_s_count_characters_27_MPI, Test_3) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int size_str1 = 240; + const int size_str2 = 120; + global_vec = {volochaev_s_count_characters_27_mpi::get_random_string(size_str2), + volochaev_s_count_characters_27_mpi::get_random_string(size_str1)}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(volochaev_s_count_characters_27_MPI, Test_4) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int size_str = 120; + global_vec = {volochaev_s_count_characters_27_mpi::get_random_string(size_str), + volochaev_s_count_characters_27_mpi::get_random_string(size_str)}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + volochaev_s_count_characters_27_mpi::Lab1_27_mpi testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + volochaev_s_count_characters_27_mpi::Lab1_27_seq testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp b/tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp new file mode 100644 index 00000000000..830bf2a2768 --- /dev/null +++ b/tasks/mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace volochaev_s_count_characters_27_mpi { + +class Lab1_27_seq : public ppc::core::Task { + public: + explicit Lab1_27_seq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res{}; +}; + +class Lab1_27_mpi : public ppc::core::Task { + public: + explicit Lab1_27_mpi(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_, local_input_; + int res{}; + int del{}; + boost::mpi::communicator world; +}; + +} // namespace volochaev_s_count_characters_27_mpi diff --git a/tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp b/tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp new file mode 100644 index 00000000000..62cd551a730 --- /dev/null +++ b/tasks/mpi/volochaev_s_count_characters_27/perf_tests/main.cpp @@ -0,0 +1,91 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp" + +TEST(volochaev_s_count_characters_27_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_string; + if (world.rank() == 0) { + count_size_string = 200000000; + std::string s(count_size_string, ' '); + global_vec = std::vector(2, s); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_sum[0]); + } +} + +TEST(volochaev_s_count_characters_27_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int string_size; + if (world.rank() == 0) { + string_size = 200000000; + std::string s(string_size, ' '); + global_vec = std::vector(2, s); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_sum[0]); + } +} diff --git a/tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp b/tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp new file mode 100644 index 00000000000..90d1a84e6a7 --- /dev/null +++ b/tasks/mpi/volochaev_s_count_characters_27/src/ops_mpi.cpp @@ -0,0 +1,126 @@ +#include "mpi/volochaev_s_count_characters_27/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::pre_processing() { + internal_order_test(); + // Init vectors + auto tmp1 = reinterpret_cast(taskData->inputs[0])[0]; + auto tmp2 = reinterpret_cast(taskData->inputs[0])[1]; + + input_ = std::vector>(std::min(tmp1.size(), tmp2.size())); + + for (size_t i = 0; i < std::min(tmp1.size(), tmp2.size()); i++) { + input_[i].first = tmp1[i]; + input_[i].second = tmp2[i]; + } + + // Init value for output + res = abs(static_cast(tmp1.size()) - static_cast(tmp2.size())); + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::run() { + internal_order_test(); + for (auto [x, y] : input_) { + if (x != y) { + res += 2; + } + } + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_seq::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = res; + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::pre_processing() { + internal_order_test(); + + // Init value for output + res = 0; + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::validation() { + internal_order_test(); + + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::run() { + internal_order_test(); + + std::string tmp1; + std::string tmp2; + int delta = 0; + if (world.rank() == 0) { + tmp1 = reinterpret_cast(taskData->inputs[0])[0]; + tmp2 = reinterpret_cast(taskData->inputs[0])[1]; + + del = abs(static_cast(tmp1.size()) - static_cast(tmp2.size())); + + delta = static_cast(std::min(tmp1.size(), tmp2.size())) / world.size(); + if (taskData->inputs_count[0] % world.size() > 0u) ++delta; + } + + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // Init vectors + input_ = std::vector>(world.size() * delta); + + for (size_t i = 0; i < std::min(tmp1.size(), tmp2.size()); ++i) { + input_[i].first = tmp1[i]; + input_[i].second = tmp2[i]; + } + + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta); + } + } + + local_input_ = std::vector>(delta); + if (world.rank() == 0) { + local_input_ = std::vector>(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + int res1 = 0; + for (auto [x, y] : local_input_) { + if (x != y) { + res1 += 2; + } + } + reduce(world, res1, res, std::plus(), 0); + return true; +} + +bool volochaev_s_count_characters_27_mpi::Lab1_27_mpi::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res + del; + } + return true; +} diff --git a/tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp b/tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp new file mode 100644 index 00000000000..7c1154192eb --- /dev/null +++ b/tasks/seq/volochaev_s_count_characters_27/func_tests/main.cpp @@ -0,0 +1,184 @@ +#include + +#include +#include + +#include "seq/volochaev_s_count_characters_27/include/ops_seq.hpp" + +namespace volochaev_s_count_characters_27_seq { + +std::string get_random_string(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + + std::string vec(sz, ' '); + for (int i = 0; i < sz; i++) { + vec[i] += gen() % 256; + } + return vec; +} +} // namespace volochaev_s_count_characters_27_seq + +TEST(volochaev_s_count_characters_27_seq, Test_0) { + // Create data + std::vector in = {volochaev_s_count_characters_27_seq::get_random_string(20)}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(volochaev_s_count_characters_27_seq, Test_1) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(20); + std::vector in(2, s); + std::vector out(1, 0); + + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_2) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(20); + std::string s1 = s; + + s1.back() = static_cast((static_cast(s1.back()) + 1) % 256); + + std::vector in = {s, s1}; + std::vector out(1, 0); + int ans = 2; + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_3) { + // Create data + + std::string s = volochaev_s_count_characters_27_seq::get_random_string(6); + std::string s1 = s.substr(0, 2); + + std::vector in = {s, s1}; + std::vector out(1, 0); + int ans = 4; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_4) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(6); + std::string s1 = s.substr(0, 2); + + std::vector in = {s1, s}; + std::vector out(1, 0); + int ans = 4; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_5) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(6); + std::vector in(2, s); + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, Test_6) { + // Create data + std::string s = volochaev_s_count_characters_27_seq::get_random_string(7); + std::vector in(2, s); + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + volochaev_s_count_characters_27_seq::Lab1_27 testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(ans, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp b/tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp new file mode 100644 index 00000000000..b8376c7ba92 --- /dev/null +++ b/tasks/seq/volochaev_s_count_characters_27/include/ops_seq.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace volochaev_s_count_characters_27_seq { + +class Lab1_27 : public ppc::core::Task { + public: + explicit Lab1_27(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int sz1, sz2; + int res{}; +}; + +} // namespace volochaev_s_count_characters_27_seq diff --git a/tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp b/tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp new file mode 100644 index 00000000000..25c7995c367 --- /dev/null +++ b/tasks/seq/volochaev_s_count_characters_27/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/volochaev_s_count_characters_27/include/ops_seq.hpp" + +TEST(volochaev_s_count_characters_27_seq, test_pipeline_run) { + // Create data + std::string s(20000000, ' '); + std::vector in(2, s); + std::vector out(1, 0); + + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out[0]); +} + +TEST(volochaev_s_count_characters_27_seq, test_task_run) { + // Create data + std::string s(20000000, ' '); + std::vector in(2, s); + std::vector out(1, 0); + int ans = 0; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ans, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp b/tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp new file mode 100644 index 00000000000..fa28835ca5b --- /dev/null +++ b/tasks/seq/volochaev_s_count_characters_27/src/ops_seq.cpp @@ -0,0 +1,50 @@ +#include "seq/volochaev_s_count_characters_27/include/ops_seq.hpp" + +#include +#include +#include + +bool volochaev_s_count_characters_27_seq::Lab1_27::pre_processing() { + internal_order_test(); + // Init value for input and output + std::string input1_ = reinterpret_cast(taskData->inputs[0])[0]; + std::string input2_ = reinterpret_cast(taskData->inputs[0])[1]; + + input_ = std::vector>(std::min(input1_.size(), input2_.size())); + + for (size_t i = 0; i < std::min(input1_.size(), input2_.size()); ++i) { + input_[i].first = input1_[i]; + input_[i].second = input2_[i]; + } + + sz1 = input1_.size(); + sz2 = input2_.size(); + res = 0; + return true; +} + +bool volochaev_s_count_characters_27_seq::Lab1_27::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool volochaev_s_count_characters_27_seq::Lab1_27::run() { + internal_order_test(); + + res = abs(sz1 - sz2); + + for (auto [x, y] : input_) { + if (x != y) { + res += 2; + } + } + + return true; +} + +bool volochaev_s_count_characters_27_seq::Lab1_27::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = res; + return true; +} From af6e319c5047942618b14c268706124dbddeaaba Mon Sep 17 00:00:00 2001 From: Sedova-Olga <113029948+Sedova-Olga@users.noreply.github.com> Date: Thu, 7 Nov 2024 02:51:19 +0300 Subject: [PATCH 134/155] =?UTF-8?q?=D0=A1=D0=B5=D0=B4=D0=BE=D0=B2=D0=B0=20?= =?UTF-8?q?=D0=9E=D0=BB=D1=8C=D0=B3=D0=B0.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2013.=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C?= =?UTF-8?q?=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B.=20(#112)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../func_tests/main.cpp | 460 ++++++++++++++++++ .../include/ops_mpi.hpp | 38 ++ .../perf_tests/main.cpp | 134 +++++ .../src/ops_mpi.cpp | 110 +++++ .../func_tests/main.cpp | 109 +++++ .../include/ops_seq.hpp | 23 + .../perf_tests/main.cpp | 156 ++++++ .../src/ops_seq.cpp | 44 ++ 8 files changed, 1074 insertions(+) create mode 100644 tasks/mpi/sedova_o_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/sedova_o_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/sedova_o_max_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/sedova_o_max_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/sedova_o_max_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/sedova_o_max_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/sedova_o_max_of_vector_elements/func_tests/main.cpp b/tasks/mpi/sedova_o_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..b6518018d35 --- /dev/null +++ b/tasks/mpi/sedova_o_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,460 @@ +#include + +#include +#include +#include +#include + +#include "mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp" + +namespace sedova_o_max_of_vector_elements_mpi_test { + +std::vector generate_random_vector(size_t size, size_t value) { + std::random_device dev; + std::mt19937 random(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = random() % (value + 1); + } + return vec; +} + +std::vector> generate_random_matrix(size_t rows, size_t cols, size_t value) { + std::vector> matrix(rows); + for (size_t i = 0; i < rows; i++) { + matrix[i] = generate_random_vector(cols, value); + } + return matrix; +} +} // namespace sedova_o_max_of_vector_elements_mpi_test + +TEST(sedova_o_max_of_vector_elements_mpi, Test1) { + ASSERT_NO_THROW(sedova_o_max_of_vector_elements_mpi_test::generate_random_vector(10, 10)); +} +TEST(sedova_o_max_of_vector_elements_mpi, Test2) { + ASSERT_NO_THROW(sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(10, 10, 10)); +} +TEST(sedova_o_max_of_vector_elements_mpi, Test3) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -30); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(1, 1, 20); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi, Test_1_5) { + size_t rows = 1; + size_t cols = 5; + size_t value = 20; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -(int)value); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + testMpiTaskParallel.validation(); + EXPECT_EQ(testMpiTaskParallel.pre_processing(), true); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi, Test_10_10) { + size_t rows = 10; + size_t cols = 10; + size_t value = 30; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)(value))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matrix[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi, Test_100_100) { + size_t rows = 100; + size_t cols = 100; + size_t value = 30; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)(value))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matrix[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi, Test_1000_1000) { + size_t rows = 1000; + size_t cols = 1000; + size_t value = 30; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)(value))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matrix[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi, Test_10_100) { + size_t rows = 10; + size_t cols = 100; + size_t value = 30; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)(value))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matrix[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi, Test_100_10) { + size_t rows = 100; + size_t cols = 10; + size_t value = 30; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)(value))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matrix[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi, Test_500_10) { + size_t rows = 10; + size_t cols = 500; + size_t value = 30; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)(value))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matrix[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi, Test_50_2) { + size_t rows = 2; + size_t cols = 50; + size_t value = 30; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)(value))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matrix[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi, Test_10_2) { + size_t rows = 2; + size_t cols = 10; + size_t value = 30; + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -((int)(value))); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, global_matrix[0][0]); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + // Create Task + sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_max[0], global_max[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..c68866ccca6 --- /dev/null +++ b/tasks/mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,38 @@ +#pragma once + +#include + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace sedova_o_max_of_vector_elements_mpi { +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int res_{}; + std::vector> input_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + boost::mpi::communicator world; + std::vector input_, loc_input_; + int res_{}; +}; +} // namespace sedova_o_max_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/sedova_o_max_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/sedova_o_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..7819c68ea72 --- /dev/null +++ b/tasks/mpi/sedova_o_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,134 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp" + +namespace sedova_o_max_of_vector_elements_mpi_test { + +std::vector generate_random_vector(size_t size, size_t value) { + std::random_device dev; + std::mt19937 random(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = random() % (value + 1); + } + return vec; +} +std::vector> generate_random_matrix(size_t rows, size_t cols, size_t value) { + std::vector> matrix(rows); + for (size_t i = 0; i < rows; i++) { + matrix[i] = generate_random_vector(cols, value); + } + return matrix; +} +} // namespace sedova_o_max_of_vector_elements_mpi_test + +TEST(sedova_o_max_of_vector_elements_mpi1, test_pipeline_run) { + size_t rows = 7000; + size_t cols = 7000; + int value = 7000; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -(int)value); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 random(dev()); + + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + rows = random() % rows; + cols = random() % cols; + global_matrix[rows][cols] = value; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(value, global_max[0]); + } +} + +TEST(sedova_o_max_of_vector_elements_mpi1, test_task_run) { + size_t rows = 7000; + size_t cols = 7000; + int value = 7000; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max(1, -(int)value); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 random(dev()); + + global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); + rows = random() % rows; + cols = random() % cols; + global_matrix[rows][cols] = value; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(value, global_max[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/sedova_o_max_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/sedova_o_max_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..4fd9f4d5f30 --- /dev/null +++ b/tasks/mpi/sedova_o_max_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,110 @@ +// Copyright 2024 Sedova Olga +#include "mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp" + +#include + +#include + +int find_max_of_matrix(std::vector &matrix) { + if (matrix.empty()) { + return std::numeric_limits::min(); + } + auto max_it = std::max_element(matrix.begin(), matrix.end()); + return *max_it; +} + +bool sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + unsigned int rows = taskData->inputs_count[0]; + unsigned int cols = taskData->inputs_count[1]; + input_ = std::vector>(rows, std::vector(cols)); + for (unsigned int i = 0; i < rows; i++) { + auto *tmp_ptr = reinterpret_cast(taskData->inputs[i]); + std::copy(tmp_ptr, tmp_ptr + cols, input_[i].begin()); + } + res_ = INT_MIN; + return true; +} + +bool sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 1 && taskData->inputs_count[1] >= 1 && taskData->outputs_count[0] == 1; +} + +bool sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + std::vector local_(input_.size()); + for (unsigned int i = 0; i < input_.size(); i++) { + local_[i] = *std::max_element(input_[i].begin(), input_[i].end()); + } + res_ = *std::max_element(local_.begin(), local_.end()); + return true; +} + +bool sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + unsigned int rows = taskData->inputs_count[0]; + unsigned int cols = taskData->inputs_count[1]; + input_ = std::vector(rows * cols); + for (unsigned int i = 0; i < rows; i++) { + auto *input_data = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < cols; j++) { + input_[i * cols + j] = input_data[j]; + } + } + } + return true; +} + +bool sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + return (world.rank() != 0) || ((taskData->outputs_count[0] == 1) && (!taskData->inputs.empty())); +} + +bool sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + unsigned int a = 0; + if (world.rank() == 0) { + a = taskData->inputs_count[0] * taskData->inputs_count[1] / world.size(); + } + broadcast(world, a, 0); + if (world.rank() == 0) { + unsigned int rows = taskData->inputs_count[0]; + unsigned int cols = taskData->inputs_count[1]; + input_ = std::vector(rows * cols); + for (unsigned int i = 0; i < rows; i++) { + auto *tmp_ = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < cols; j++) { + input_[i * cols + j] = tmp_[j]; + } + } + for (int i = 1; i < world.size(); i++) { + world.send(i, 0, input_.data() + a * i, a); + } + } + loc_input_ = std::vector(a); + if (world.rank() == 0) { + loc_input_ = std::vector(input_.begin(), input_.begin() + a); + } else { + world.recv(0, 0, loc_input_.data(), a); + } + int loc_res = *std::max_element(loc_input_.begin(), loc_input_.end()); + reduce(world, loc_res, res_, boost::mpi::maximum(), 0); + return true; +} + +bool sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/seq/sedova_o_max_of_vector_elements/func_tests/main.cpp b/tasks/seq/sedova_o_max_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..44f6fb536b8 --- /dev/null +++ b/tasks/seq/sedova_o_max_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,109 @@ +// Copyright 2024 Sedova Olga +#include + +#include +#include + +#include "core/task/include/task.hpp" +#include "seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp" + +namespace sedova_o_max_of_vector_elements_seq_test { + +std::vector generate_random_vector(size_t size, size_t value) { + std::random_device dev; + std::mt19937 random(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = random() % (value + 1); + } + return vec; +} + +std::vector> generate_random_matrix(size_t rows, size_t cols, size_t value) { + std::vector> matrix(rows); + for (size_t i = 0; i < rows; i++) { + matrix[i] = generate_random_vector(cols, value); + } + return matrix; +} +} // namespace sedova_o_max_of_vector_elements_seq_test + +TEST(sedova_o_max_of_vector_elements_seq1, Test_Sum_Empty1) { + // Create data + std::vector in; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + sedova_o_max_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(sedova_o_max_of_vector_elements_seq1, Test_Sum_Input_Incorrect) { + int count = 10; + // Create data + std::vector in(count, 0); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(0); // Неверный размер входного вектора + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + sedova_o_max_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(sedova_o_max_of_vector_elements_seq1, Test_Matrix_2x2) { + // Create data + std::vector in = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(2, 10); + std::vector in2 = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(2, 10); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in2.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + sedova_o_max_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + std::vector matrix_input = {in[0], in[1], in2[0], in2[1]}; + ASSERT_EQ(sedova_o_max_of_vector_elements_seq::find_max_of_matrix(matrix_input), out[0]); +} + +TEST(sedova_o_max_of_vector_elements_seq1, Test_Matrix_3x3) { + // Create data + std::vector in = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(3, 10); + std::vector in2 = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(3, 10); + std::vector in3 = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(3, 10); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in2.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in3.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in2.size()); + taskDataSeq->inputs_count.emplace_back(in3.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + sedova_o_max_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + std::vector matrix_input = {in[0], in[1], in[2], in2[0], in2[1], in2[2], in3[0], in3[1], in3[2]}; + ASSERT_EQ(sedova_o_max_of_vector_elements_seq::find_max_of_matrix(matrix_input), out[0]); +} diff --git a/tasks/seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp b/tasks/seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..e94ff3d479e --- /dev/null +++ b/tasks/seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace sedova_o_max_of_vector_elements_seq { +int find_max_of_matrix(std::vector matrix); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int res_{}; + std::vector input_{}; +}; + +} // namespace sedova_o_max_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/sedova_o_max_of_vector_elements/perf_tests/main.cpp b/tasks/seq/sedova_o_max_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..5fece2a10e8 --- /dev/null +++ b/tasks/seq/sedova_o_max_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,156 @@ +// Copyright 2024 Sedova Olga +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "core/task/include/task.hpp" +#include "seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp" + +namespace sedova_o_max_of_vector_elements_seq_test { + +std::vector generate_random_vector(size_t size, size_t value) { + std::random_device dev; + std::mt19937 random(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = random() % (value + 1); + } + return vec; +} + +std::vector> generate_random_matrix(size_t rows, size_t cols, size_t value) { + std::vector> matrix(rows); + for (size_t i = 0; i < rows; i++) { + matrix[i] = generate_random_vector(cols, value); + } + return matrix; +} +} // namespace sedova_o_max_of_vector_elements_seq_test + +TEST(sedova_o_max_of_vector_elements_seq, test_pipeline_run_small_matrix) { + std::random_device dev; + std::mt19937 random(dev()); + + std::shared_ptr taskDataSeq = std::make_shared(); + size_t size = 5000; + int value = 5000; + + std::vector> in; + in = sedova_o_max_of_vector_elements_seq_test::generate_random_matrix(size, size, value); + std::vector out(1, in[0][0]); + + size_t rows = random() % size; + size_t cols = random() % size; + in[rows][cols] = value; + + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(value, out[0]); +} + +TEST(sedova_o_max_of_vector_elements_seq, test_pipeline_run_large_matrix) { + std::random_device dev; + std::mt19937 random(dev()); + + std::shared_ptr taskDataSeq = std::make_shared(); + size_t size = 50000; + int value = 50000; + + std::vector> in; + in = sedova_o_max_of_vector_elements_seq_test::generate_random_matrix(size, size, value); + std::vector out(1, in[0][0]); + + size_t rows = random() % size; + size_t cols = random() % size; + in[rows][cols] = value; + + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(value, out[0]); +} + +TEST(sedova_o_max_of_vector_elements_seq, test_pipeline_run_different_values) { + std::random_device dev; + std::mt19937 random(dev()); + + std::shared_ptr taskDataSeq = std::make_shared(); + size_t size = 15000; + int value = 15000; + + std::vector> in; + in = sedova_o_max_of_vector_elements_seq_test::generate_random_matrix(size, size, value); + std::vector out(1, in[0][0]); + + size_t rows = random() % size; + size_t cols = random() % size; + in[rows][cols] = value + 1; + + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->inputs_count.emplace_back(size); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(value + 1, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/sedova_o_max_of_vector_elements/src/ops_seq.cpp b/tasks/seq/sedova_o_max_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..c3417af265b --- /dev/null +++ b/tasks/seq/sedova_o_max_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,44 @@ +// Copyright 2024 Sedova Olga +#include "seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp" + +int sedova_o_max_of_vector_elements_seq::find_max_of_matrix(std::vector matrix) { + if (matrix.empty()) return 1; + int max = matrix[0]; + for (size_t i = 0; i < matrix.size(); i++) { + if (matrix[i] > max) { + max = matrix[i]; + } + } + return max; +} + +bool sedova_o_max_of_vector_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + unsigned int rows = taskData->inputs_count[0]; + unsigned int cols = taskData->inputs_count[1]; + input_ = std::vector(rows * cols); + for (unsigned int i = 0; i < rows; i++) { + auto* input_data = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i * cols + j] = input_data[j]; + } + } + return true; +} + +bool sedova_o_max_of_vector_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 1 && taskData->inputs_count[1] >= 1 && taskData->outputs_count[0] == 1; +} + +bool sedova_o_max_of_vector_elements_seq::TestTaskSequential::run() { + internal_order_test(); + res_ = sedova_o_max_of_vector_elements_seq::find_max_of_matrix(input_); + return true; +} + +bool sedova_o_max_of_vector_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} \ No newline at end of file From 8b62b9a4a7c51fc41d9804362fe60f4cf29d7c3a Mon Sep 17 00:00:00 2001 From: KAPUSHA228 <125039835+KAPUSHA228@users.noreply.github.com> Date: Thu, 7 Nov 2024 02:52:07 +0300 Subject: [PATCH 135/155] =?UTF-8?q?=D0=A6=D0=B0=D1=86=D1=8B=D0=BD=20=D0=90?= =?UTF-8?q?=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4=D1=80.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=209.=20=20=D0=A1=D0=BA=D0=B0=D0=BB=D1=8F?= =?UTF-8?q?=D1=80=D0=BD=D0=BE=D0=B5=20=D0=BF=D1=80=D0=BE=D0=B8=D0=B7=D0=B2?= =?UTF-8?q?=D0=B5=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=B2=D0=B5=D0=BA=D1=82?= =?UTF-8?q?=D0=BE=D1=80=D0=BE=D0=B2.=20(#133)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../func_tests/main.cpp | 142 ++++++++++++++ .../include/ops_mpi.hpp | 51 +++++ .../perf_tests/main.cpp | 101 ++++++++++ .../src/ops_mpi.cpp | 133 +++++++++++++ .../func_tests/main.cpp | 181 ++++++++++++++++++ .../include/ops_seq.hpp | 27 +++ .../perf_tests/main.cpp | 102 ++++++++++ .../src/ops_seq.cpp | 48 +++++ 8 files changed, 785 insertions(+) create mode 100644 tasks/mpi/tsatsyn_a_vector_dot_product/func_tests/main.cpp create mode 100644 tasks/mpi/tsatsyn_a_vector_dot_product/include/ops_mpi.hpp create mode 100644 tasks/mpi/tsatsyn_a_vector_dot_product/perf_tests/main.cpp create mode 100644 tasks/mpi/tsatsyn_a_vector_dot_product/src/ops_mpi.cpp create mode 100644 tasks/seq/tsatsyn_a_vector_dot_product/func_tests/main.cpp create mode 100644 tasks/seq/tsatsyn_a_vector_dot_product/include/ops_seq.hpp create mode 100644 tasks/seq/tsatsyn_a_vector_dot_product/perf_tests/main.cpp create mode 100644 tasks/seq/tsatsyn_a_vector_dot_product/src/ops_seq.cpp diff --git a/tasks/mpi/tsatsyn_a_vector_dot_product/func_tests/main.cpp b/tasks/mpi/tsatsyn_a_vector_dot_product/func_tests/main.cpp new file mode 100644 index 00000000000..65ea1478d31 --- /dev/null +++ b/tasks/mpi/tsatsyn_a_vector_dot_product/func_tests/main.cpp @@ -0,0 +1,142 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include +#include +#include + +#include "mpi/tsatsyn_a_vector_dot_product/include/ops_mpi.hpp" +std::vector toGetRandomVector(int size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector v(size); + for (int i = 0; i < size; i++) { + v[i] = gen() % 200 + gen() % 10; + } + return v; +} + +TEST(tsatsyn_a_vector_dot_product_mpi, Test_Random_Scalar) { + boost::mpi::communicator world; + std::vector v1 = toGetRandomVector(60); + std::vector v2 = toGetRandomVector(60); + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataPar->inputs_count.emplace_back(v1.size()); + + taskDataPar->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataPar->inputs_count.emplace_back(v2.size()); + + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + tsatsyn_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(tsatsyn_a_vector_dot_product_mpi::resulting(v1, v2), res[0]); + } +} +TEST(tsatsyn_a_vector_dot_product_mpi, 10xTest_Random_Scalar) { + boost::mpi::communicator world; + std::vector v1 = toGetRandomVector(120); + std::vector v2 = toGetRandomVector(120); + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataPar->inputs_count.emplace_back(v1.size()); + + taskDataPar->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataPar->inputs_count.emplace_back(v2.size()); + + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + tsatsyn_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector res2(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res2.data())); + taskDataSeq->outputs_count.emplace_back(res2.size()); + tsatsyn_a_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(res2[0], res[0]); + ASSERT_EQ(tsatsyn_a_vector_dot_product_mpi::resulting(v1, v2), res[0]); + } +} +TEST(tsatsyn_a_vector_dot_product_mpi, 100xTest_Random_Scalar) { + boost::mpi::communicator world; + std::vector v1 = toGetRandomVector(1200); + std::vector v2 = toGetRandomVector(1200); + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataPar->inputs_count.emplace_back(v1.size()); + + taskDataPar->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataPar->inputs_count.emplace_back(v2.size()); + + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + tsatsyn_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(tsatsyn_a_vector_dot_product_mpi::resulting(v1, v2), res[0]); + } +} +TEST(tsatsyn_a_vector_dot_product_mpi, 1000xTest_Random_Scalar) { + boost::mpi::communicator world; + std::vector v1 = toGetRandomVector(12000); + std::vector v2 = toGetRandomVector(12000); + std::vector res(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataPar->inputs_count.emplace_back(v1.size()); + + taskDataPar->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataPar->inputs_count.emplace_back(v2.size()); + + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + tsatsyn_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(tsatsyn_a_vector_dot_product_mpi::resulting(v1, v2), res[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/tsatsyn_a_vector_dot_product/include/ops_mpi.hpp b/tasks/mpi/tsatsyn_a_vector_dot_product/include/ops_mpi.hpp new file mode 100644 index 00000000000..9f8797a7e7b --- /dev/null +++ b/tasks/mpi/tsatsyn_a_vector_dot_product/include/ops_mpi.hpp @@ -0,0 +1,51 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace tsatsyn_a_vector_dot_product_mpi { +int resulting(const std::vector& v1, const std::vector& v2); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector v1; + std::vector v2; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector v1; + std::vector v2; + std::vector local_v1; + std::vector local_v2; + int res{}; + boost::mpi::communicator world; + unsigned int delta; +}; + +} // namespace tsatsyn_a_vector_dot_product_mpi \ No newline at end of file diff --git a/tasks/mpi/tsatsyn_a_vector_dot_product/perf_tests/main.cpp b/tasks/mpi/tsatsyn_a_vector_dot_product/perf_tests/main.cpp new file mode 100644 index 00000000000..6fe5db59940 --- /dev/null +++ b/tasks/mpi/tsatsyn_a_vector_dot_product/perf_tests/main.cpp @@ -0,0 +1,101 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/tsatsyn_a_vector_dot_product/include/ops_mpi.hpp" +std::vector toGetRandomVector(int size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector v(size); + for (int i = 0; i < size; i++) { + v[i] = gen() % 200 + gen() % 10; + } + return v; +} +TEST(mpi_tsatsyn_a_vector_dot_product_perf_test, test_pipeline_run) { + int size = 12000000; + boost::mpi::communicator world; + std::vector v1 = toGetRandomVector(size); + std::vector v2 = toGetRandomVector(size); + std::vector ans(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataPar->inputs_count.emplace_back(v1.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataPar->inputs_count.emplace_back(v2.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataPar->outputs_count.emplace_back(ans.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(tsatsyn_a_vector_dot_product_mpi::resulting(v1, v2), ans[0]); + } +} + +TEST(mpi_tsatsyn_a_vector_dot_product_perf_test, test_task_run) { + int size = 12000000; + boost::mpi::communicator world; + std::vector v1 = toGetRandomVector(size); + std::vector v2 = toGetRandomVector(size); + std::vector ans(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataPar->inputs_count.emplace_back(v1.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataPar->inputs_count.emplace_back(v2.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataPar->outputs_count.emplace_back(ans.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(tsatsyn_a_vector_dot_product_mpi::resulting(v1, v2), ans[0]); + } +} diff --git a/tasks/mpi/tsatsyn_a_vector_dot_product/src/ops_mpi.cpp b/tasks/mpi/tsatsyn_a_vector_dot_product/src/ops_mpi.cpp new file mode 100644 index 00000000000..a48ca7c86b4 --- /dev/null +++ b/tasks/mpi/tsatsyn_a_vector_dot_product/src/ops_mpi.cpp @@ -0,0 +1,133 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/tsatsyn_a_vector_dot_product/include/ops_mpi.hpp" + +#include +#include +#include +#include + +using namespace std::chrono_literals; + +int tsatsyn_a_vector_dot_product_mpi::resulting(const std::vector& v1, const std::vector& v2) { + int64_t res = 0; + for (size_t i = 0; i < v1.size(); ++i) { + res += v1[i] * v2[i]; + } + return res; +} + +bool tsatsyn_a_vector_dot_product_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + v1.resize(taskData->inputs_count[0]); + v2.resize(taskData->inputs_count[1]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + std::copy(tempPtr, tempPtr + taskData->inputs_count[0], v1.begin()); + tempPtr = reinterpret_cast(taskData->inputs[1]); + std::copy(tempPtr, tempPtr + taskData->inputs_count[0], v2.begin()); + res = 0; + return true; +} + +bool tsatsyn_a_vector_dot_product_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && + taskData->outputs.size() == 1; +} + +bool tsatsyn_a_vector_dot_product_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < v1.size(); i++) res += v1[i] * v2[i]; + return true; +} + +bool tsatsyn_a_vector_dot_product_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool tsatsyn_a_vector_dot_product_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + (int)(taskData->inputs_count[0]) < world.size() ? delta = taskData->inputs_count[0] + : delta = taskData->inputs_count[0] / world.size(); + for (size_t i = 0; i < taskData->inputs.size(); ++i) { + if (taskData->inputs[i] == nullptr || taskData->inputs_count[i] == 0) { + return false; + } + } + v1.resize(taskData->inputs_count[0]); + int* source_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(source_ptr, source_ptr + taskData->inputs_count[0], v1.begin()); + + v2.resize(taskData->inputs_count[1]); + source_ptr = reinterpret_cast(taskData->inputs[1]); + std::copy(source_ptr, source_ptr + taskData->inputs_count[1], v2.begin()); + } + return true; +} + +bool tsatsyn_a_vector_dot_product_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if (taskData->inputs.empty() || taskData->outputs.empty() || + taskData->inputs_count[0] != taskData->inputs_count[1] || taskData->outputs_count[0] == 0) { + return false; + } + } + return true; +} + +bool tsatsyn_a_vector_dot_product_mpi::TestMPITaskParallel::run() { + internal_order_test(); + broadcast(world, delta, 0); + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); ++proc) { + world.send(proc, 0, v1.data() + proc * delta, delta); + world.send(proc, 1, v2.data() + proc * delta, delta); + } + } + local_v1.resize(delta); + local_v2.resize(delta); + if (world.rank() == 0) { + std::copy(v1.begin(), v1.begin() + delta, local_v1.begin()); + std::copy(v2.begin(), v2.begin() + delta, local_v2.begin()); + } else { + world.recv(0, 0, local_v1.data(), delta); + world.recv(0, 1, local_v2.data(), delta); + } + int local_result = 0; + for (size_t i = 0; i < local_v1.size(); ++i) { + local_result += local_v1[i] * local_v2[i]; + } + std::vector full_results; + gather(world, local_result, full_results, 0); + res = 0; + if (world.rank() == 0) { + for (int result : full_results) { + res += result; + } + } + if (world.rank() == 0 && (int)(taskData->inputs_count[0]) < world.size()) { + res = 0; + for (size_t i = 0; i < v1.size(); ++i) { + res += v1[i] * v2[i]; + } + } + return true; +} + +bool tsatsyn_a_vector_dot_product_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + if (!taskData->outputs.empty()) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } else { + return false; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/tsatsyn_a_vector_dot_product/func_tests/main.cpp b/tasks/seq/tsatsyn_a_vector_dot_product/func_tests/main.cpp new file mode 100644 index 00000000000..f051a03f787 --- /dev/null +++ b/tasks/seq/tsatsyn_a_vector_dot_product/func_tests/main.cpp @@ -0,0 +1,181 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "seq/tsatsyn_a_vector_dot_product/include/ops_seq.hpp" + +std::vector toGetRandomVector(int size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector v(size); + for (int i = 0; i < size; i++) { + v[i] = gen() % 200 + gen() % 10; + } + return v; +} + +TEST(tsatsyn_a_vector_dot_product_seq, Test_Random_Scalar) { + const int size = 5; + + // Create data + std::vector v1 = toGetRandomVector(size); + std::vector v2 = toGetRandomVector(size); + std::vector ans(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataSeq->outputs_count.emplace_back(ans.size()); + + // Create Task + tsatsyn_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); +} + +TEST(tsatsyn_a_vector_dot_product_seq, Test_Negative_Validation) { + // Create data + std::vector v1 = {1, 2, 3}; + std::vector v2 = {1, 2, 3, 4}; + std::vector ans(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataSeq->outputs_count.emplace_back(ans.size()); + + // Create Task + tsatsyn_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(tsatsyn_a_vector_dot_product_seq, Test_Scalar_32) { + // Create data + std::vector v1 = {1, 2, 3}; + std::vector v2 = {4, 5, 6}; + std::vector ans(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataSeq->outputs_count.emplace_back(ans.size()); + + // Create Task + tsatsyn_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(tsatsyn_a_vector_dot_product_seq::resulting(v1, v2), ans[0]); +} + +TEST(tsatsyn_a_vector_dot_product_seq, Test_Scalar_28) { + // Create data + std::vector v1 = {6, 4, 5}; + std::vector v2 = {1, 3, 2}; + std::vector ans(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataSeq->outputs_count.emplace_back(ans.size()); + + // Create Task + tsatsyn_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(tsatsyn_a_vector_dot_product_seq::resulting(v1, v2), ans[0]); +} + +TEST(tsatsyn_a_vector_dot_product_seq, Test_Scalar_95) { + // Create data + std::vector v1 = {7, 4, 6}; + std::vector v2 = {3, 5, 9}; + std::vector ans(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataSeq->outputs_count.emplace_back(ans.size()); + + // Create Task + tsatsyn_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(tsatsyn_a_vector_dot_product_seq::resulting(v1, v2), ans[0]); +} + +TEST(tsatsyn_a_vector_dot_product_seq, Test_Scalar_2330) { + // Create data + std::vector v1 = {20, 54, 23}; + std::vector v2 = {32, 10, 50}; + std::vector ans(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataSeq->outputs_count.emplace_back(ans.size()); + + // Create Task + tsatsyn_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(tsatsyn_a_vector_dot_product_seq::resulting(v1, v2), ans[0]); +} + +TEST(tsatsyn_a_vector_dot_product_seq, Test_Scalar_1956) { + // Create data + std::vector v1 = {12, 100, 50}; + std::vector v2 = {13, 3, 30}; + std::vector ans(1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataSeq->outputs_count.emplace_back(ans.size()); + + // Create Task + tsatsyn_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(tsatsyn_a_vector_dot_product_seq::resulting(v1, v2), ans[0]); +} diff --git a/tasks/seq/tsatsyn_a_vector_dot_product/include/ops_seq.hpp b/tasks/seq/tsatsyn_a_vector_dot_product/include/ops_seq.hpp new file mode 100644 index 00000000000..5f065f54f1f --- /dev/null +++ b/tasks/seq/tsatsyn_a_vector_dot_product/include/ops_seq.hpp @@ -0,0 +1,27 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace tsatsyn_a_vector_dot_product_seq { +int resulting(const std::vector& v1, const std::vector& v2); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector v1; + std::vector v2; + int res{}; +}; + +} // namespace tsatsyn_a_vector_dot_product_seq \ No newline at end of file diff --git a/tasks/seq/tsatsyn_a_vector_dot_product/perf_tests/main.cpp b/tasks/seq/tsatsyn_a_vector_dot_product/perf_tests/main.cpp new file mode 100644 index 00000000000..555e08111dc --- /dev/null +++ b/tasks/seq/tsatsyn_a_vector_dot_product/perf_tests/main.cpp @@ -0,0 +1,102 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/tsatsyn_a_vector_dot_product/include/ops_seq.hpp" + +std::vector toGetRandomVector(int size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector v(size); + for (int i = 0; i < size; i++) { + v[i] = gen() % 200 + gen() % 10; + } + return v; +} + +TEST(sequential_tsatsyn_a_vector_dot_product_perf_test, test_pipeline_run) { + const int size = 10000000; + + // Create data + std::vector v1 = toGetRandomVector(size); + std::vector v2 = toGetRandomVector(size); + std::vector ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataSeq->outputs_count.emplace_back(ans.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + // ASSERT_EQ(tsatsyn_a_vector_dot_product::resulting(v1, v2), ans[0]); + ASSERT_EQ(tsatsyn_a_vector_dot_product_seq::resulting(v1, v2), ans[0]); +} + +TEST(sequential_tsatsyn_a_vector_dot_product_perf_test, test_task_run) { + const int size = 10000000; + + // Create data + std::vector v1 = toGetRandomVector(size); + std::vector v2 = toGetRandomVector(size); + std::vector ans(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + taskDataSeq->inputs_count.emplace_back(v2.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ans.data())); + taskDataSeq->outputs_count.emplace_back(ans.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(tsatsyn_a_vector_dot_product_seq::resulting(v1, v2), ans[0]); +} \ No newline at end of file diff --git a/tasks/seq/tsatsyn_a_vector_dot_product/src/ops_seq.cpp b/tasks/seq/tsatsyn_a_vector_dot_product/src/ops_seq.cpp new file mode 100644 index 00000000000..5beb2d239a2 --- /dev/null +++ b/tasks/seq/tsatsyn_a_vector_dot_product/src/ops_seq.cpp @@ -0,0 +1,48 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/tsatsyn_a_vector_dot_product/include/ops_seq.hpp" + +#include +#include +using namespace std::chrono_literals; + +int tsatsyn_a_vector_dot_product_seq::resulting(const std::vector& v1, const std::vector& v2) { + int res = 0; + for (size_t i = 0; i < v1.size(); ++i) { + res += v1[i] * v2[i]; + } + return res; +} + +bool tsatsyn_a_vector_dot_product_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + v1.resize(taskData->inputs_count[0]); + v2.resize(taskData->inputs_count[1]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + std::copy(tempPtr, tempPtr + taskData->inputs_count[0], v1.begin()); + tempPtr = reinterpret_cast(taskData->inputs[1]); + std::copy(tempPtr, tempPtr + taskData->inputs_count[1], v2.begin()); + res = 0; + return true; +} + +bool tsatsyn_a_vector_dot_product_seq::TestTaskSequential::validation() { + internal_order_test(); + return (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + taskData->outputs_count[0] == 1 && (taskData->outputs.size() == taskData->outputs_count.size()) && + taskData->outputs.size() == 1; +} + +bool tsatsyn_a_vector_dot_product_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < v1.size(); i++) { + res += v1[i] * v2[i]; + } + return true; +} + +bool tsatsyn_a_vector_dot_product_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From b7994c1c7a8184171caf83baf2baf37bcfd611cf Mon Sep 17 00:00:00 2001 From: keshaproger <125856206+keshaproger@users.noreply.github.com> Date: Thu, 7 Nov 2024 02:53:55 +0300 Subject: [PATCH 136/155] =?UTF-8?q?=D0=9A=D0=BE=D0=BD=D1=8C=D0=BA=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=98=D0=B2=D0=B0=D0=BD.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2024.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87=D0=B5=D1=82=20=D1=81?= =?UTF-8?q?=D0=BB=D0=BE=D0=B2=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA?= =?UTF-8?q?=D0=B5.=20(#187)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../konkov_i_count_words/func_tests/main.cpp | 203 ++++++++++++++++++ .../konkov_i_count_words/include/ops_mpi.hpp | 27 +++ .../konkov_i_count_words/perf_tests/main.cpp | 100 +++++++++ .../mpi/konkov_i_count_words/src/ops_mpi.cpp | 79 +++++++ .../konkov_i_count_words/func_tests/main.cpp | 159 ++++++++++++++ .../konkov_i_count_words/include/ops_seq.hpp | 24 +++ .../konkov_i_count_words/perf_tests/main.cpp | 87 ++++++++ .../seq/konkov_i_count_words/src/ops_seq.cpp | 33 +++ 8 files changed, 712 insertions(+) create mode 100644 tasks/mpi/konkov_i_count_words/func_tests/main.cpp create mode 100644 tasks/mpi/konkov_i_count_words/include/ops_mpi.hpp create mode 100644 tasks/mpi/konkov_i_count_words/perf_tests/main.cpp create mode 100644 tasks/mpi/konkov_i_count_words/src/ops_mpi.cpp create mode 100644 tasks/seq/konkov_i_count_words/func_tests/main.cpp create mode 100644 tasks/seq/konkov_i_count_words/include/ops_seq.hpp create mode 100644 tasks/seq/konkov_i_count_words/perf_tests/main.cpp create mode 100644 tasks/seq/konkov_i_count_words/src/ops_seq.cpp diff --git a/tasks/mpi/konkov_i_count_words/func_tests/main.cpp b/tasks/mpi/konkov_i_count_words/func_tests/main.cpp new file mode 100644 index 00000000000..7cefa95f539 --- /dev/null +++ b/tasks/mpi/konkov_i_count_words/func_tests/main.cpp @@ -0,0 +1,203 @@ +// Copyright 2023 Konkov Ivan +#include + +#include +#include +#include +#include + +#include "mpi/konkov_i_count_words/include/ops_mpi.hpp" + +std::string generate_random_string(int length) { + static const char alphanum[] = + "0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + std::string result; + result.reserve(length); + for (int i = 0; i < length; ++i) { + result += alphanum[rand() % (sizeof(alphanum) - 1)]; + } + return result; +} + +TEST(konkov_i_count_words_mpi, Test_Empty_String) { + boost::mpi::communicator world; + std::string input; + int expected_count = 0; + + std::vector out(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + konkov_i_count_words_mpi::CountWordsTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(expected_count, out[0]); + } +} + +TEST(konkov_i_count_words_mpi, Test_Single_Word) { + boost::mpi::communicator world; + std::string input = "Hello"; + int expected_count = 1; + + std::vector out(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + konkov_i_count_words_mpi::CountWordsTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(expected_count, out[0]); + } +} + +TEST(konkov_i_count_words_mpi, Test_Multiple_Words) { + boost::mpi::communicator world; + std::string input = "Hello world this is a test"; + int expected_count = 6; + + std::vector out(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + konkov_i_count_words_mpi::CountWordsTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(expected_count, out[0]); + } +} + +TEST(konkov_i_count_words_mpi, Test_Random_String) { + boost::mpi::communicator world; + std::string input = generate_random_string(100); + + std::istringstream stream(input); + std::string word; + int expected_count = 0; + while (stream >> word) { + expected_count++; + } + + std::vector out(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + konkov_i_count_words_mpi::CountWordsTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(expected_count, out[0]); + } +} + +TEST(konkov_i_count_words_mpi, Test_Multiple_Spaces) { + boost::mpi::communicator world; + std::string input = "Hello world this is a test"; + int expected_count = 6; + + std::vector out(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + konkov_i_count_words_mpi::CountWordsTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(expected_count, out[0]); + } +} + +TEST(konkov_i_count_words_mpi, Test_Newlines) { + boost::mpi::communicator world; + std::string input = "Hello\nworld\nthis\nis\na\ntest"; + int expected_count = 6; + + std::vector out(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + konkov_i_count_words_mpi::CountWordsTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(expected_count, out[0]); + } +} + +TEST(konkov_i_count_words_mpi, Test_Punctuation) { + boost::mpi::communicator world; + std::string input = "Hello, world! This is a test."; + int expected_count = 6; + + std::vector out(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + konkov_i_count_words_mpi::CountWordsTaskParallel testTaskParallel(taskDataPar); + ASSERT_EQ(testTaskParallel.validation(), true); + testTaskParallel.pre_processing(); + testTaskParallel.run(); + testTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(expected_count, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/konkov_i_count_words/include/ops_mpi.hpp b/tasks/mpi/konkov_i_count_words/include/ops_mpi.hpp new file mode 100644 index 00000000000..da968921a82 --- /dev/null +++ b/tasks/mpi/konkov_i_count_words/include/ops_mpi.hpp @@ -0,0 +1,27 @@ +// ops_mpi.hpp +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace konkov_i_count_words_mpi { + +class CountWordsTaskParallel : public ppc::core::Task { + public: + explicit CountWordsTaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_; + int word_count_{}; + boost::mpi::communicator world; +}; + +} // namespace konkov_i_count_words_mpi diff --git a/tasks/mpi/konkov_i_count_words/perf_tests/main.cpp b/tasks/mpi/konkov_i_count_words/perf_tests/main.cpp new file mode 100644 index 00000000000..ace869bd668 --- /dev/null +++ b/tasks/mpi/konkov_i_count_words/perf_tests/main.cpp @@ -0,0 +1,100 @@ +// Copyright 2023 Konkov Ivan +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/konkov_i_count_words/include/ops_mpi.hpp" + +std::string generate_large_string(int size) { + std::string base = "Hello world this is a test "; + std::string result; + while (result.size() < static_cast(size)) { + result += base; + } + return result; +} + +TEST(konkov_i_count_words_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::string input = generate_large_string(1000000); + + std::istringstream stream(input); + std::string word; + int expected_count = 0; + while (stream >> word) { + expected_count++; + } + + std::vector out(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_count, out[0]); + } +} + +TEST(konkov_i_count_words_mpi, test_task_run) { + boost::mpi::communicator world; + std::string input = generate_large_string(1000000); + + std::istringstream stream(input); + std::string word; + int expected_count = 0; + while (stream >> word) { + expected_count++; + } + + std::vector out(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&input)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_count, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/konkov_i_count_words/src/ops_mpi.cpp b/tasks/mpi/konkov_i_count_words/src/ops_mpi.cpp new file mode 100644 index 00000000000..7e71624aa78 --- /dev/null +++ b/tasks/mpi/konkov_i_count_words/src/ops_mpi.cpp @@ -0,0 +1,79 @@ +// ops_mpi.cpp +#include "mpi/konkov_i_count_words/include/ops_mpi.hpp" + +#include +#include + +bool konkov_i_count_words_mpi::CountWordsTaskParallel::pre_processing() { + internal_order_test(); + word_count_ = 0; + return true; +} + +bool konkov_i_count_words_mpi::CountWordsTaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1 && taskData->inputs[0] != nullptr && + taskData->outputs[0] != nullptr; + } + return true; +} + +bool konkov_i_count_words_mpi::CountWordsTaskParallel::run() { + internal_order_test(); + int num_processes = world.size(); + int rank = world.rank(); + + if (rank == 0) { + input_ = *reinterpret_cast(taskData->inputs[0]); + + std::vector words; + std::istringstream stream(input_); + std::string word; + while (stream >> word) { + words.push_back(word); + } + + int total_words = words.size(); + int chunk_size = total_words / num_processes; + + for (int i = 1; i < num_processes; ++i) { + int start_pos = i * chunk_size; + int end_pos = (i == num_processes - 1) ? total_words : (i + 1) * chunk_size; + std::vector chunk(words.begin() + start_pos, words.begin() + end_pos); + std::ostringstream oss; + for (const auto& w : chunk) { + oss << w << " "; + } + world.send(i, 0, oss.str()); + } + + words.assign(words.begin(), words.begin() + chunk_size); + std::ostringstream oss; + for (const auto& w : words) { + oss << w << " "; + } + input_ = oss.str(); + } else { + world.recv(0, 0, input_); + } + + int local_word_count = 0; + std::istringstream local_stream(input_); + std::string local_word; + while (local_stream >> local_word) { + local_word_count++; + } + + boost::mpi::reduce(world, local_word_count, word_count_, std::plus<>(), 0); + + return true; +} + +bool konkov_i_count_words_mpi::CountWordsTaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = word_count_; + } + return true; +} diff --git a/tasks/seq/konkov_i_count_words/func_tests/main.cpp b/tasks/seq/konkov_i_count_words/func_tests/main.cpp new file mode 100644 index 00000000000..e4a526c9f15 --- /dev/null +++ b/tasks/seq/konkov_i_count_words/func_tests/main.cpp @@ -0,0 +1,159 @@ +// Copyright 2023 Konkov Ivan +#include + +#include +#include + +#include "seq/konkov_i_count_words/include/ops_seq.hpp" + +std::string generate_random_string(int length) { + static const char alphanum[] = + "0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + std::string result; + result.reserve(length); + for (int i = 0; i < length; ++i) { + result += alphanum[rand() % (sizeof(alphanum) - 1)]; + } + return result; +} + +TEST(konkov_i_count_words_seq, Test_Empty_String) { + std::string input; + int expected_count = 0; + + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + konkov_i_count_words_seq::CountWordsTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(konkov_i_count_words_seq, Test_Single_Word) { + std::string input = "Hello"; + int expected_count = 1; + + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + konkov_i_count_words_seq::CountWordsTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(konkov_i_count_words_seq, Test_Multiple_Words) { + std::string input = "Hello world this is a test"; + int expected_count = 6; + + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + konkov_i_count_words_seq::CountWordsTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(konkov_i_count_words_seq, Test_Random_String) { + std::string input = generate_random_string(100); + + std::istringstream stream(input); + std::string word; + int expected_count = 0; + while (stream >> word) { + expected_count++; + } + + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + konkov_i_count_words_seq::CountWordsTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(konkov_i_count_words_seq, Test_Multiple_Spaces) { + std::string input = "Hello world this is a test"; + int expected_count = 6; + + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + konkov_i_count_words_seq::CountWordsTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(konkov_i_count_words_seq, Test_Newlines) { + std::string input = "Hello\nworld\nthis\nis\na\ntest"; + int expected_count = 6; + + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + konkov_i_count_words_seq::CountWordsTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(konkov_i_count_words_seq, Test_Punctuation) { + std::string input = "Hello, world! This is a test."; + int expected_count = 6; + + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + konkov_i_count_words_seq::CountWordsTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expected_count, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/konkov_i_count_words/include/ops_seq.hpp b/tasks/seq/konkov_i_count_words/include/ops_seq.hpp new file mode 100644 index 00000000000..7db58e32905 --- /dev/null +++ b/tasks/seq/konkov_i_count_words/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Konkov Ivan +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace konkov_i_count_words_seq { + +class CountWordsTaskSequential : public ppc::core::Task { + public: + explicit CountWordsTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_; + int word_count_{}; +}; + +} // namespace konkov_i_count_words_seq \ No newline at end of file diff --git a/tasks/seq/konkov_i_count_words/perf_tests/main.cpp b/tasks/seq/konkov_i_count_words/perf_tests/main.cpp new file mode 100644 index 00000000000..44755e40aaf --- /dev/null +++ b/tasks/seq/konkov_i_count_words/perf_tests/main.cpp @@ -0,0 +1,87 @@ +// Copyright 2023 Konkov Ivan +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/konkov_i_count_words/include/ops_seq.hpp" + +std::string generate_large_string(int size) { + std::string base = "Hello world this is a test "; + std::string result; + while (result.size() < static_cast(size)) { + result += base; + } + return result; +} + +TEST(konkov_i_count_words_seq, test_pipeline_run) { + std::string input = generate_large_string(100000); + + std::istringstream stream(input); + std::string word; + int expected_count = 0; + while (stream >> word) { + expected_count++; + } + + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_count, out[0]); +} + +TEST(konkov_i_count_words_seq, test_task_run) { + std::string input = generate_large_string(100000); + + std::istringstream stream(input); + std::string word; + int expected_count = 0; + while (stream >> word) { + expected_count++; + } + + std::vector out(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&input)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expected_count, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/konkov_i_count_words/src/ops_seq.cpp b/tasks/seq/konkov_i_count_words/src/ops_seq.cpp new file mode 100644 index 00000000000..dd397bdd684 --- /dev/null +++ b/tasks/seq/konkov_i_count_words/src/ops_seq.cpp @@ -0,0 +1,33 @@ +// Copyright 2023 Konkov Ivan +#include "seq/konkov_i_count_words/include/ops_seq.hpp" + +#include + +bool konkov_i_count_words_seq::CountWordsTaskSequential::pre_processing() { + internal_order_test(); + input_ = *reinterpret_cast(taskData->inputs[0]); + word_count_ = 0; + return true; +} + +bool konkov_i_count_words_seq::CountWordsTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1 && taskData->inputs[0] != nullptr && + taskData->outputs[0] != nullptr; +} + +bool konkov_i_count_words_seq::CountWordsTaskSequential::run() { + internal_order_test(); + std::istringstream stream(input_); + std::string word; + while (stream >> word) { + word_count_++; + } + return true; +} + +bool konkov_i_count_words_seq::CountWordsTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = word_count_; + return true; +} \ No newline at end of file From f117ddf7308db293a67c2b021bcff533594f6ecf Mon Sep 17 00:00:00 2001 From: KorneevaEkaterina <112872480+KorneevaEkaterina@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:02:04 +0300 Subject: [PATCH 137/155] =?UTF-8?q?=D0=9A=D0=BE=D1=80=D0=BD=D0=B5=D0=B5?= =?UTF-8?q?=D0=B2=D0=B0=20=D0=95=D0=BA=D0=B0=D1=82=D0=B5=D1=80=D0=B8=D0=BD?= =?UTF-8?q?=D0=B0.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92?= =?UTF-8?q?=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=206.=20=D0=9D=D0=B0=D1=85?= =?UTF-8?q?=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=87=D0=B8=D1=81?= =?UTF-8?q?=D0=BB=D0=B0=20=D0=BD=D0=B0=D1=80=D1=83=D1=88=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B9=20=D1=83=D0=BF=D0=BE=D1=80=D1=8F=D0=B4=D0=BE=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=BD=D0=BE=D1=81=D1=82=D0=B8=20=D1=81=D0=BE=D1=81=D0=B5?= =?UTF-8?q?=D0=B4=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0?= =?UTF-8?q?.=20(#175)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **seq:** Задача сводится к подсчёту количества «нарушений упорядоченности» в числовой последовательности. Нарушение фиксируется, если предыдущий элемент больше последующего. Тестирование включает проверки на фиксированных наборах данных и случайных последовательностях разного размера, а также перформанс-тесты, которые оценивают скорость работы на больших массивах. **mpi:** Параллельная версия задачи с использованием MPI также выполняет подсчёт «нарушений упорядоченности», но данные распределяются между процессами, каждый из которых обрабатывает свою часть последовательности. Результаты затем объединяются на основном процессе. Тестирование аналогично охватывает проверку корректности на фиксированных и случайных наборах данных, а также измерение производительности. --- .../func_tests/main.cpp | 422 ++++++++++++++++++ .../include/ops_mpi.hpp | 162 +++++++ .../perf_tests/main.cpp | 104 +++++ .../src/ops_mpi.cpp | 1 + .../func_tests/main.cpp | 112 +++++ .../include/ops_seq.hpp | 73 +++ .../perf_tests/main.cpp | 99 ++++ .../src/ops_seq.cpp | 1 + 8 files changed, 974 insertions(+) create mode 100644 tasks/mpi/korneeva_e_num_of_orderly_violations/func_tests/main.cpp create mode 100644 tasks/mpi/korneeva_e_num_of_orderly_violations/include/ops_mpi.hpp create mode 100644 tasks/mpi/korneeva_e_num_of_orderly_violations/perf_tests/main.cpp create mode 100644 tasks/mpi/korneeva_e_num_of_orderly_violations/src/ops_mpi.cpp create mode 100644 tasks/seq/korneeva_e_num_of_orderly_violations/func_tests/main.cpp create mode 100644 tasks/seq/korneeva_e_num_of_orderly_violations/include/ops_seq.hpp create mode 100644 tasks/seq/korneeva_e_num_of_orderly_violations/perf_tests/main.cpp create mode 100644 tasks/seq/korneeva_e_num_of_orderly_violations/src/ops_seq.cpp diff --git a/tasks/mpi/korneeva_e_num_of_orderly_violations/func_tests/main.cpp b/tasks/mpi/korneeva_e_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..3530e41a632 --- /dev/null +++ b/tasks/mpi/korneeva_e_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,422 @@ +#include + +#include "mpi/korneeva_e_num_of_orderly_violations/include/ops_mpi.hpp" + +// Test for a single-element vector, expecting no violations +TEST(korneeva_e_num_of_orderly_violations_mpi, NoViolations_SingleElement) { + const int N = 1; // Size of the vector + boost::mpi::communicator world; + int rank = world.rank(); + std::vector arr(N, 42); // Initialize vector with a single value + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (rank == 0) { + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + + task.pre_processing(); + task.run(); + task.post_processing(); + + // Check the result only on the root process + if (rank == 0) { + int expected_count = 0; + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a monotonically increasing vector, expecting no violations +TEST(korneeva_e_num_of_orderly_violations_mpi, NoViolations_IncreasingOrder) { + const int N = 100; // Size of the vector + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + std::iota(arr.begin(), arr.end(), 0); // Fill the vector with increasing numbers (0, 1, 2, ..., N-1) + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + // Check the result only on the root process + if (world.rank() == 0) { + int expected_count = 0; // No violations expected + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a monotonically decreasing vector, expecting maximum violations +TEST(korneeva_e_num_of_orderly_violations_mpi, FullViolations_DecreasingOrder) { + const int N = 100; // Size of the vector + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + // Only the root process initializes the input and output + if (world.rank() == 0) { + for (int i = 0; i < N; ++i) { + arr[i] = N - i; // Fill the vector with decreasing numbers (N, N-1, N-2, ..., 1) + } + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + // Check the result only on the root process + if (world.rank() == 0) { + int expected_count = N - 1; // Maximum violations expected + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a vector with all equal elements, expecting no violations +TEST(korneeva_e_num_of_orderly_violations_mpi, NoViolations_AllElementsEqual) { + const int N = 100; // Size of the vector + boost::mpi::communicator world; + std::vector arr(N, 5); // All elements equal to 5 + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = 0; // No violations expected + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for an empty vector with int data type, expecting no violations +TEST(korneeva_e_num_of_orderly_violations_mpi, NoViolations_EmptyVector_Int) { + const int N = 0; + boost::mpi::communicator world; + std::vector arr(N); // Empty vector + std::vector out(1, 0); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + // For an empty vector, there should be 0 violations + int expected_count = 0; + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a vector of 10 random integers +TEST(korneeva_e_num_of_orderly_violations_mpi, CountViolations_Int_10) { + const int N = 10; + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(-N, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = task.count_orderly_violations(arr); + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a vector of 100 random integers +TEST(korneeva_e_num_of_orderly_violations_mpi, CountViolations_Int_100) { + const int N = 100; + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(-N, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = task.count_orderly_violations(arr); + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a vector of 1000 random integers +TEST(korneeva_e_num_of_orderly_violations_mpi, CountViolations_Int_1000) { + const int N = 1000; + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(-N, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = task.count_orderly_violations(arr); + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a vector of 10000 random integers +TEST(korneeva_e_num_of_orderly_violations_mpi, CountViolations_Int_10000) { + const int N = 10000; + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(-N, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = task.count_orderly_violations(arr); + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a vector of 10 random doubles +TEST(korneeva_e_num_of_orderly_violations_mpi, CountViolations_Double_10) { + const int N = 10; + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_real_distribution dist(-10.0, 10.0); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = task.count_orderly_violations(arr); + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a vector of 100 random doubles +TEST(korneeva_e_num_of_orderly_violations_mpi, CountViolations_Double_100) { + const int N = 100; + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_real_distribution dist(-100.0, 100.0); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = task.count_orderly_violations(arr); + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a vector of 1000 random doubles +TEST(korneeva_e_num_of_orderly_violations_mpi, CountViolations_Double_1000) { + const int N = 1000; + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_real_distribution dist(-1000.0, 1000.0); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = task.count_orderly_violations(arr); + ASSERT_EQ(out[0], expected_count); + } +} + +// Test for a vector of 10000 random doubles +TEST(korneeva_e_num_of_orderly_violations_mpi, CountViolations_Double_10000) { + const int N = 10000; + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_real_distribution dist(-10000.0, 10000.0); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = task.count_orderly_violations(arr); + ASSERT_EQ(out[0], expected_count); + } +} + +TEST(korneeva_e_num_of_orderly_violations_mpi, CountViolations_SmallData_FewerThanProcesses) { + boost::mpi::communicator world; + std::vector arr = {3, 2, 1}; + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + + if (world.rank() == 0) { + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + + korneeva_e_num_of_orderly_violations_mpi::num_of_orderly_violations task(data_seq); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + + if (world.rank() == 0) { + int expected_count = task.count_orderly_violations(arr); + ASSERT_EQ(out[0], expected_count); + } +} \ No newline at end of file diff --git a/tasks/mpi/korneeva_e_num_of_orderly_violations/include/ops_mpi.hpp b/tasks/mpi/korneeva_e_num_of_orderly_violations/include/ops_mpi.hpp new file mode 100644 index 00000000000..7fb49bb410b --- /dev/null +++ b/tasks/mpi/korneeva_e_num_of_orderly_violations/include/ops_mpi.hpp @@ -0,0 +1,162 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace korneeva_e_num_of_orderly_violations_mpi { + +template +class num_of_orderly_violations : public ppc::core::Task { + public: + explicit num_of_orderly_violations(std::shared_ptr taskData_) : Task(taskData_) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + cntype count_orderly_violations(std::vector vec); + + private: + std::vector input_data_; // Local copy of data for processing + cntype violation_count_; // Variable to store count of violations + boost::mpi::communicator mpi_comm; // MPI communicator for parallel processing + + size_t input_size; + size_t local_vector_size_; // Size of the local data vector + std::vector received_data_; // Buffer for data received from other processes + + std::vector send_sizes; + int chunk_size_; + int remainder_; +}; + +template +bool num_of_orderly_violations::pre_processing() { + internal_order_test(); + + if (mpi_comm.rank() == 0) { + input_size = taskData->inputs_count[0]; + input_data_.resize(input_size); + const auto* source_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(source_ptr, source_ptr + input_size, input_data_.begin()); + } + return true; +} + +template +bool num_of_orderly_violations::validation() { + internal_order_test(); // Validate internal order + + // Process 0 checks the validity of input and output counts + if (mpi_comm.rank() == 0) { + bool valid_output = (taskData->outputs_count[0] == 1); + bool valid_inputs = (taskData->inputs_count.size() == 1) && (taskData->inputs_count[0] >= 0); + + return valid_output && valid_inputs; // Return true if both checks pass + } + return true; // Other processes do not validate +} + +template +bool num_of_orderly_violations::run() { + internal_order_test(); + int process_rank = mpi_comm.rank(); + int total_processes = mpi_comm.size(); + + boost::mpi::broadcast(mpi_comm, input_size, 0); + + if (input_size <= 1) { + violation_count_ = 0; + return true; + } + + if (process_rank == 0) { + chunk_size_ = input_size / total_processes; + remainder_ = input_size % total_processes; + } + + boost::mpi::broadcast(mpi_comm, chunk_size_, 0); + boost::mpi::broadcast(mpi_comm, remainder_, 0); + + send_sizes.resize(total_processes); + for (int i = 0; i < total_processes; ++i) { + send_sizes[i] = chunk_size_ + (i < remainder_ ? 1 : 0); + } + local_vector_size_ = send_sizes[process_rank]; + + received_data_.resize(local_vector_size_); + std::vector offsets(total_processes, 0); + for (int i = 1; i < total_processes; ++i) { + offsets[i] = offsets[i - 1] + send_sizes[i - 1]; + } + + boost::mpi::scatterv(mpi_comm, input_data_, send_sizes, offsets, received_data_.data(), local_vector_size_, 0); + + cntype local_violations = 0; + if (local_vector_size_ > 1) { + for (size_t i = 0; i < local_vector_size_ - 1; ++i) { + if (received_data_[i + 1] < received_data_[i]) { + local_violations++; + } + } + } + + if (local_vector_size_ > 0) { + iotype left_boundary; + iotype right_boundary; + bool is_last_active_process = (process_rank == total_processes - 1 || send_sizes[process_rank + 1] == 0); + + if (!is_last_active_process) { + mpi_comm.recv(process_rank + 1, 0, right_boundary); + if (received_data_[local_vector_size_ - 1] > right_boundary) { + local_violations++; + } + } + + if (process_rank > 0) { + left_boundary = received_data_[0]; + mpi_comm.send(process_rank - 1, 0, left_boundary); + } + } + + boost::mpi::reduce(mpi_comm, local_violations, violation_count_, std::plus(), 0); + return true; +} + +template +bool num_of_orderly_violations::post_processing() { + internal_order_test(); // Validate internal order + + // Process 0 writes the total violation count to output + if (mpi_comm.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = violation_count_; + } + return true; +} + +template +cntype num_of_orderly_violations::count_orderly_violations(std::vector vector_data) { + cntype violation_count = 0; // Initialize violation count + + // Return zero if the input vector is empty + if (vector_data.empty()) { + return violation_count; + } + + // Count violations in the provided vector + for (size_t index = 0; index < vector_data.size() - 1; ++index) { + if (vector_data[index + 1] < vector_data[index]) { + violation_count++; + } + } + return violation_count; // Return the total violation count +} + +} // namespace korneeva_e_num_of_orderly_violations_mpi \ No newline at end of file diff --git a/tasks/mpi/korneeva_e_num_of_orderly_violations/perf_tests/main.cpp b/tasks/mpi/korneeva_e_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..d8ff0db6e67 --- /dev/null +++ b/tasks/mpi/korneeva_e_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,104 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/korneeva_e_num_of_orderly_violations/include/ops_mpi.hpp" + +// Test to measure and validate pipeline execution +TEST(korneeva_e_num_of_orderly_violations_mpi, test_pipeline_execution) { + const int vector_size = 10000000; // Define vector size + std::vector data_vector(vector_size); // Data vector + std::vector result_buffer(1, 0); // Result buffer + boost::mpi::communicator comm_world; // Create MPI communicator + + // Create task data + auto task_data = std::make_shared(); + if (comm_world.rank() == 0) { + std::random_device random_device; + std::default_random_engine engine(random_device()); + std::uniform_int_distribution distribution(0, 100); // Generate random values from 0 to 100 + std::generate(data_vector.begin(), data_vector.end(), [&]() { return distribution(engine); }); + + // Fill input and output data + task_data->inputs.emplace_back(reinterpret_cast(data_vector.data())); + task_data->inputs_count.emplace_back(data_vector.size()); + task_data->outputs.emplace_back(reinterpret_cast(result_buffer.data())); + task_data->outputs_count.emplace_back(result_buffer.size()); + } + + // Create MPI task + auto mpi_task = + std::make_shared>(task_data); + + // Setup performance attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Number of runs for performance measurement + const boost::mpi::timer timer_instance; // Timer instance + perfAttr->current_timer = [&] { return timer_instance.elapsed(); }; // Lambda for elapsed time + + // Initialize performance results + auto perfResults = std::make_shared(); + + // Create performance analyzer + auto perfAnalyzer = std::make_shared(mpi_task); + perfAnalyzer->pipeline_run(perfAttr, perfResults); // Run the pipeline + + if (comm_world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); // Print performance statistics + auto computed_result = mpi_task->count_orderly_violations(data_vector); // Sequential processing + ASSERT_EQ(computed_result, result_buffer[0]); // Validate result + } +} + +// Test to measure and validate task execution +TEST(korneeva_e_num_of_orderly_violations_mpi, test_task_run) { + const int numElements = 10000000; // Number of elements in the vector + boost::mpi::communicator world; + std::vector global_vec(numElements); // Global vector + std::vector out(1, 0); // Output buffer + + // Create TaskData + std::shared_ptr taskData = std::make_shared(); + + if (world.rank() == 0) { + std::random_device randomDevice; + std::default_random_engine reng(randomDevice()); + std::uniform_int_distribution dist(0, 100); // Generate values between 0 and 100 + std::generate(global_vec.begin(), global_vec.end(), [&dist, &reng] { return dist(reng); }); + taskData->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskData->inputs_count.emplace_back(global_vec.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + } + + // Create MPI task + auto testMpiTaskParallel = + std::make_shared>(taskData); + + // Create a buffer for receiving data + std::vector recv_buf(numElements / world.size()); // Size adjusted for scatter logic + + // Create performance attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Number of runs for performance measurement + const boost::mpi::timer current_timer; // Timer instance + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; // Lambda for elapsed time + + // Create and initialize performance results + auto perfResults = std::make_shared(); + + // Create performance analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + + // Run the task + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); // Print performance statistics + // Count orderly violations on the master process + auto temp = testMpiTaskParallel->count_orderly_violations(global_vec); + ASSERT_EQ(out[0], temp); // Validate result + } +} diff --git a/tasks/mpi/korneeva_e_num_of_orderly_violations/src/ops_mpi.cpp b/tasks/mpi/korneeva_e_num_of_orderly_violations/src/ops_mpi.cpp new file mode 100644 index 00000000000..db64d73dba0 --- /dev/null +++ b/tasks/mpi/korneeva_e_num_of_orderly_violations/src/ops_mpi.cpp @@ -0,0 +1 @@ +#include "mpi/korneeva_e_num_of_orderly_violations/include/ops_mpi.hpp" diff --git a/tasks/seq/korneeva_e_num_of_orderly_violations/func_tests/main.cpp b/tasks/seq/korneeva_e_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..389107b11ca --- /dev/null +++ b/tasks/seq/korneeva_e_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,112 @@ +#include + +#include +#include +#include + +#include "seq/korneeva_e_num_of_orderly_violations/include/ops_seq.hpp" + +// Function to create a test environment +template +std::shared_ptr createTaskData(const std::vector& data) { + auto taskData = std::make_shared(); + taskData->inputs_count.push_back(data.size()); + taskData->inputs.push_back(const_cast(reinterpret_cast(data.data()))); + return taskData; +} + +// General function for preparing data and checking results +template +void runOrderlyViolationsTest(const std::vector& data, int expectedViolations) { + auto taskData = createTaskData(data); + korneeva_e_num_of_orderly_violations_seq::OrderlyViolationsCounter counter(taskData); + ASSERT_EQ(counter.count_orderly_violations(data), expectedViolations); +} + +// Function for tests with random numbers +template +void runRandomTest(int size) { + std::vector numbers(size); + std::random_device randomDevice; + std::default_random_engine randomEngine(randomDevice()); + std::uniform_int_distribution distribution(0, size); + + std::generate(numbers.begin(), numbers.end(), [&distribution, &randomEngine] { return distribution(randomEngine); }); + + auto taskDataPtr = std::make_shared(); + taskDataPtr->inputs.emplace_back(reinterpret_cast(numbers.data())); + taskDataPtr->inputs_count.emplace_back(numbers.size()); + + std::vector output(1); + taskDataPtr->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataPtr->outputs_count.emplace_back(1); + + korneeva_e_num_of_orderly_violations_seq::OrderlyViolationsCounter violationCounter(taskDataPtr); + + ASSERT_EQ(violationCounter.validation(), true); + + violationCounter.pre_processing(); + violationCounter.run(); + violationCounter.post_processing(); + + int result = violationCounter.count_orderly_violations(numbers); + ASSERT_EQ(result, output[0]); +} + +TEST(korneeva_e_num_of_orderly_violations_seq, NoViolations) { runOrderlyViolationsTest({1, 2, 3, 4, 5}, 0); } + +TEST(korneeva_e_num_of_orderly_violations_seq, AllViolations) { runOrderlyViolationsTest({5, 4, 3, 2, 1}, 4); } + +TEST(korneeva_e_num_of_orderly_violations_seq, MixedViolations) { runOrderlyViolationsTest({1, 3, 2, 4, 0}, 2); } + +TEST(korneeva_e_num_of_orderly_violations_seq, SingleElement) { runOrderlyViolationsTest({42}, 0); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Negative_Elements) { + runOrderlyViolationsTest({-1, -2, -3, -4, -5}, 4); +} + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Repeating_Elements) { + runOrderlyViolationsTest({1, 2, 2, 1, 2}, 1); +} + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_0_int) { runOrderlyViolationsTest({}, 0); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Random_10_int) { runRandomTest(10); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Random_100_int) { runRandomTest(100); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Random_1000_int) { runRandomTest(1000); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Random_10000_int) { runRandomTest(10000); } + +TEST(korneeva_e_num_of_orderly_violations_seq, NoViolations_double) { + runOrderlyViolationsTest({1.1, 2.2, 3.3, 4.4, 5.5}, 0); +} + +TEST(korneeva_e_num_of_orderly_violations_seq, AllViolations_double) { + runOrderlyViolationsTest({5.5, 4.4, 3.3, 2.2, 1.1}, 4); +} + +TEST(korneeva_e_num_of_orderly_violations_seq, MixedViolations_double) { + runOrderlyViolationsTest({1.1, 3.3, 2.2, 4.4, 0.0}, 2); +} + +TEST(korneeva_e_num_of_orderly_violations_seq, SingleElement_double) { runOrderlyViolationsTest({42.0}, 0); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Negative_Elements_double) { + runOrderlyViolationsTest({-1.1, -2.2, -3.3, -4.4, -5.5}, 4); +} + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Repeating_Elements_double) { + runOrderlyViolationsTest({1.1, 2.2, 2.2, 1.1, 2.2}, 1); +} + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_0_double) { runOrderlyViolationsTest({}, 0); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Random_10_double) { runRandomTest(10); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Random_100_double) { runRandomTest(100); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Random_1000_double) { runRandomTest(1000); } + +TEST(korneeva_e_num_of_orderly_violations_seq, Test_Random_10000_double) { runRandomTest(10000); } diff --git a/tasks/seq/korneeva_e_num_of_orderly_violations/include/ops_seq.hpp b/tasks/seq/korneeva_e_num_of_orderly_violations/include/ops_seq.hpp new file mode 100644 index 00000000000..dc6e22e4989 --- /dev/null +++ b/tasks/seq/korneeva_e_num_of_orderly_violations/include/ops_seq.hpp @@ -0,0 +1,73 @@ +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace korneeva_e_num_of_orderly_violations_seq { + +template +class OrderlyViolationsCounter : public ppc::core::Task { + public: + explicit OrderlyViolationsCounter(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + cntype count_orderly_violations(const std::vector& data); + + private: + std::vector input_; // Input vector + cntype result_; // Number of violations +}; + +template +bool OrderlyViolationsCounter::pre_processing() { + internal_order_test(); + + int n = taskData->inputs_count[0]; + input_.resize(n); + void* ptr_r = taskData->inputs[0]; + std::copy(static_cast(ptr_r), static_cast(ptr_r) + n, input_.begin()); + + result_ = 0; + return true; +} + +template +bool OrderlyViolationsCounter::validation() { + internal_order_test(); + return (taskData && taskData->inputs_count[0] > 0 && !taskData->inputs.empty() && taskData->outputs_count[0] == 1); +} + +template +bool OrderlyViolationsCounter::run() { + internal_order_test(); + result_ = count_orderly_violations(input_); + return true; +} + +template +bool OrderlyViolationsCounter::post_processing() { + internal_order_test(); + if (!taskData || taskData->outputs.empty()) { + return false; + } + *reinterpret_cast(taskData->outputs[0]) = result_; + return true; +} + +template +cntype OrderlyViolationsCounter::count_orderly_violations(const std::vector& data) { + cntype count = 0; + for (size_t i = 1; i < data.size(); ++i) { + if (data[i - 1] > data[i]) { + ++count; + } + } + return count; +} +} // namespace korneeva_e_num_of_orderly_violations_seq \ No newline at end of file diff --git a/tasks/seq/korneeva_e_num_of_orderly_violations/perf_tests/main.cpp b/tasks/seq/korneeva_e_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..f3e415a14ce --- /dev/null +++ b/tasks/seq/korneeva_e_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,99 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/korneeva_e_num_of_orderly_violations/include/ops_seq.hpp" + +TEST(korneeva_e_num_of_orderly_violations_seq, test_pipeline_run) { + const int numElements = 100000000; // Size of input data + + // Generate input and prepare output container + std::vector inputData(numElements); + std::vector outputData(1, 0); + + std::random_device randomDevice; + std::mt19937 generator(randomDevice()); + std::uniform_int_distribution dist(0, numElements); + + std::generate(inputData.begin(), inputData.end(), [&]() { return dist(generator); }); + + // Configure TaskData + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(inputData.data())); + taskData->inputs_count.emplace_back(inputData.size()); + taskData->outputs.emplace_back(reinterpret_cast(outputData.data())); + taskData->outputs_count.emplace_back(outputData.size()); + + // Instantiate OrderlyViolationsCounter Task + auto violationCounterTask = + std::make_shared>(taskData); + + // Set up performance attributes and timer + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto startTime = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&]() { + auto elapsed = std::chrono::high_resolution_clock::now() - startTime; + return std::chrono::duration(elapsed).count(); + }; + + // Initialize performance results container + auto perfResults = std::make_shared(); + + // Run performance analysis + auto perfAnalyzer = std::make_shared(violationCounterTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + // Validate result + int expectedViolations = violationCounterTask->count_orderly_violations(inputData); + ASSERT_EQ(expectedViolations, outputData[0]); +} + +TEST(korneeva_e_num_of_orderly_violations_seq, test_task_run) { + const int dataSize = 100000000; + + // Initialize input data with random integers and prepare output container + std::vector inputData(dataSize); + std::vector outputData(1, 0); + + std::random_device randomDevice; + std::mt19937 engine(randomDevice()); + std::uniform_int_distribution distribution(0, dataSize); + + std::generate(inputData.begin(), inputData.end(), [&]() { return distribution(engine); }); + + // Configure TaskData + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(inputData.data())); + taskData->inputs_count.emplace_back(inputData.size()); + taskData->outputs.emplace_back(reinterpret_cast(outputData.data())); + taskData->outputs_count.emplace_back(outputData.size()); + + // Initialize OrderlyViolationsCounter Task + auto violationCounter = + std::make_shared>(taskData); + + // Set up performance attributes with a custom timer + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto startTime = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&]() { + auto elapsed = std::chrono::high_resolution_clock::now() - startTime; + return std::chrono::duration(elapsed).count(); + }; + + // Initialize performance results + auto perfResults = std::make_shared(); + + // Run task and gather performance statistics + auto perfAnalyzer = std::make_shared(violationCounter); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + // Validate output + int calculatedViolations = violationCounter->count_orderly_violations(inputData); + ASSERT_EQ(calculatedViolations, outputData[0]); +} \ No newline at end of file diff --git a/tasks/seq/korneeva_e_num_of_orderly_violations/src/ops_seq.cpp b/tasks/seq/korneeva_e_num_of_orderly_violations/src/ops_seq.cpp new file mode 100644 index 00000000000..9b03fdfb5a9 --- /dev/null +++ b/tasks/seq/korneeva_e_num_of_orderly_violations/src/ops_seq.cpp @@ -0,0 +1 @@ +#include "seq/korneeva_e_num_of_orderly_violations/include/ops_seq.hpp" \ No newline at end of file From 51bd825a70de9ab801df7792fc41a56f3b732ac9 Mon Sep 17 00:00:00 2001 From: Pybigboy <125855996+Pybigboy@users.noreply.github.com> Date: Thu, 7 Nov 2024 18:09:58 +0300 Subject: [PATCH 138/155] =?UTF-8?q?=D0=A1=D0=B0=D1=80=D0=B0=D1=84=D0=B0?= =?UTF-8?q?=D0=BD=D0=BE=D0=B2=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2027.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87?= =?UTF-8?q?=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D0=BD=D0=B5?= =?UTF-8?q?=D1=81=D0=BE=D0=B2=D0=BF=D0=B0=D0=B4=D0=B0=D1=8E=D1=89=D0=B8?= =?UTF-8?q?=D1=85=20=D1=81=D0=B8=D0=BC=D0=B2=D0=BE=D0=BB=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=B4=D0=B2=D1=83=D1=85=20=D1=81=D1=82=D1=80=D0=BE=D0=BA.=20(#?= =?UTF-8?q?172)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Общее описание задачи: Задача подсчета несовпадающих символов в двух строках заключается в определении количества позиций, на которых символы в этих строках отличаются. Входные данные представляют собой две строки одинаковой длины, а результатом работы алгоритма является целое число — общее количество несовпадающих символов. ### Описание последовательной задачи: В последовательной версии алгоритма осуществляется проход по строкам от начала до конца. Для каждой пары символов, находящихся на одной позиции, проверяется, совпадают ли они. Если символы не совпадают, счетчик увеличивается на единицу. По завершении прохода значение счетчика соответствует общему количеству несовпадающих символов. ### Описание MPI задачи: В параллельной версии алгоритма процесс с рангом 0 делит обе строки на блоки. Каждый процесс получает свою пару блоков и подсчитывает количество несовпадающих символов в них. После завершения вычислений каждый процесс отправляет свой частичный результат процессу с рангом 0, используя операцию `reduce` для суммирования всех частичных результатов. Процесс с рангом 0 получает значение, отражающее общее количество несовпадающих символов. --- .../func_tests/main.cpp | 243 ++++++++++++++++++ .../include/ops_mpi.hpp | 38 +++ .../perf_tests/main.cpp | 91 +++++++ .../src/ops_mpi.cpp | 93 +++++++ .../func_tests/main.cpp | 167 ++++++++++++ .../include/ops_seq.hpp | 22 ++ .../perf_tests/main.cpp | 76 ++++++ .../src/ops_seq.cpp | 30 +++ 8 files changed, 760 insertions(+) create mode 100644 tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/func_tests/main.cpp create mode 100644 tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_mpi.hpp create mode 100644 tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/perf_tests/main.cpp create mode 100644 tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/src/ops_mpi.cpp create mode 100644 tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/func_tests/main.cpp create mode 100644 tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_seq.hpp create mode 100644 tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/perf_tests/main.cpp create mode 100644 tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/src/ops_seq.cpp diff --git a/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/func_tests/main.cpp b/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/func_tests/main.cpp new file mode 100644 index 00000000000..d096a0f1c2d --- /dev/null +++ b/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/func_tests/main.cpp @@ -0,0 +1,243 @@ +#include + +#include +#include +#include +#include + +#include "mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_mpi.hpp" + +namespace sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi { + +std::string randomString(size_t size) { + const std::string characters = + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789" + "!@#$%^&*()-_=+[]{};:,.<>?"; + + std::string result; + result.reserve(size); + + std::random_device device; + std::mt19937 generator(device()); + std::uniform_int_distribution distribution(0, int(characters.size()) - 1); + + for (size_t i = 0; i < size; ++i) { + result += characters[distribution(generator)]; + } + + return result; +} + +} // namespace sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi, with_special_characters_and_different_cases) { + boost::mpi::communicator world; + std::string input_a; + std::string input_b; + + auto par_task_output = std::vector(1); + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input_a = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::randomString(100000); + input_b = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::randomString(100000); + par_task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + par_task_data->inputs_count.emplace_back(input_a.size()); + par_task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + par_task_data->inputs_count.emplace_back(input_b.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_task_output.data())); + par_task_data->outputs_count.emplace_back(par_task_output.size()); + } + + auto par_task = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask(par_task_data); + ASSERT_TRUE(par_task.validation()); + par_task.pre_processing(); + par_task.run(); + par_task.post_processing(); + + if (world.rank() == 0) { + auto seq_task_output = std::vector(1); + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + seq_task_data->inputs_count.emplace_back(input_a.size()); + seq_task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + seq_task_data->inputs_count.emplace_back(input_b.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_task_output.data())); + seq_task_data->outputs_count.emplace_back(seq_task_output.size()); + + auto seq_task = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::SequentialTask(seq_task_data); + ASSERT_TRUE(seq_task.validation()); + seq_task.pre_processing(); + seq_task.run(); + seq_task.post_processing(); + + ASSERT_EQ(par_task_output[0], seq_task_output[0]); + } +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi, both_strings_are_empty) { + boost::mpi::communicator world; + std::string input_a; + std::string input_b; + + auto par_task_output = std::vector(1); + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input_a = ""; + input_b = ""; + par_task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + par_task_data->inputs_count.emplace_back(input_a.size()); + par_task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + par_task_data->inputs_count.emplace_back(input_b.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_task_output.data())); + par_task_data->outputs_count.emplace_back(par_task_output.size()); + } + + auto par_task = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask(par_task_data); + ASSERT_TRUE(par_task.validation()); + par_task.pre_processing(); + par_task.run(); + par_task.post_processing(); + + if (world.rank() == 0) { + auto seq_task_output = std::vector(1); + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + seq_task_data->inputs_count.emplace_back(input_a.size()); + seq_task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + seq_task_data->inputs_count.emplace_back(input_b.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_task_output.data())); + seq_task_data->outputs_count.emplace_back(seq_task_output.size()); + + auto seq_task = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::SequentialTask(seq_task_data); + ASSERT_TRUE(seq_task.validation()); + seq_task.pre_processing(); + seq_task.run(); + seq_task.post_processing(); + + ASSERT_EQ(par_task_output[0], seq_task_output[0]); + } +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi, all_matching_characters) { + boost::mpi::communicator world; + std::string input_a; + std::string input_b; + + auto par_task_output = std::vector(1); + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input_a = std::string(100000, 'a'); + input_b = std::string(100000, 'a'); + par_task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + par_task_data->inputs_count.emplace_back(input_a.size()); + par_task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + par_task_data->inputs_count.emplace_back(input_b.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_task_output.data())); + par_task_data->outputs_count.emplace_back(par_task_output.size()); + } + + auto par_task = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask(par_task_data); + ASSERT_TRUE(par_task.validation()); + par_task.pre_processing(); + par_task.run(); + par_task.post_processing(); + + if (world.rank() == 0) { + auto seq_task_output = std::vector(1); + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + seq_task_data->inputs_count.emplace_back(input_a.size()); + seq_task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + seq_task_data->inputs_count.emplace_back(input_b.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_task_output.data())); + seq_task_data->outputs_count.emplace_back(seq_task_output.size()); + + auto seq_task = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::SequentialTask(seq_task_data); + ASSERT_TRUE(seq_task.validation()); + seq_task.pre_processing(); + seq_task.run(); + seq_task.post_processing(); + + ASSERT_EQ(par_task_output[0], seq_task_output[0]); + } +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi, error_when_input_lengths_are_different) { + boost::mpi::communicator world; + std::string input_a; + std::string input_b; + + auto par_task_output = std::vector(1); + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input_a = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::randomString(100000); + input_b = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::randomString(100001); + par_task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + par_task_data->inputs_count.emplace_back(input_a.size()); + par_task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + par_task_data->inputs_count.emplace_back(input_b.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_task_output.data())); + par_task_data->outputs_count.emplace_back(par_task_output.size()); + } + + auto par_task = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask(par_task_data); + if (world.rank() == 0) { + ASSERT_FALSE(par_task.validation()); + } else { + ASSERT_TRUE(par_task.validation()); + } +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi, error_when_output_size_is_not_equal_to_one) { + boost::mpi::communicator world; + std::string input_a; + std::string input_b; + + auto par_task_output = std::vector(0); + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input_a = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::randomString(100000); + input_b = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::randomString(100000); + par_task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + par_task_data->inputs_count.emplace_back(input_a.size()); + par_task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + par_task_data->inputs_count.emplace_back(input_b.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_task_output.data())); + par_task_data->outputs_count.emplace_back(par_task_output.size()); + } + + auto par_task = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask(par_task_data); + if (world.rank() == 0) { + ASSERT_FALSE(par_task.validation()); + } else { + ASSERT_TRUE(par_task.validation()); + } +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi, error_when_one_string_is_empty) { + boost::mpi::communicator world; + std::string input_a; + std::string input_b; + + auto par_task_output = std::vector(1); + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input_a = ""; + input_b = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::randomString(100000); + par_task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + par_task_data->inputs_count.emplace_back(input_a.size()); + par_task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + par_task_data->inputs_count.emplace_back(input_b.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_task_output.data())); + par_task_data->outputs_count.emplace_back(par_task_output.size()); + } + + auto par_task = sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask(par_task_data); + if (world.rank() == 0) { + ASSERT_FALSE(par_task.validation()); + } else { + ASSERT_TRUE(par_task.validation()); + } +} diff --git a/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_mpi.hpp b/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_mpi.hpp new file mode 100644 index 00000000000..58b5c2922ae --- /dev/null +++ b/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_mpi.hpp @@ -0,0 +1,38 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi { + +class SequentialTask : public ppc::core::Task { + public: + explicit SequentialTask(std::shared_ptr task_data) : Task(std::move(task_data)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_a_, input_b_; + int result_{}; +}; + +class ParallelTask : public ppc::core::Task { + public: + explicit ParallelTask(std::shared_ptr task_data) : Task(std::move(task_data)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_a_, input_b_; + int result_{}; + + boost::mpi::communicator world; +}; + +} // namespace sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi diff --git a/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/perf_tests/main.cpp b/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/perf_tests/main.cpp new file mode 100644 index 00000000000..3f0933621a2 --- /dev/null +++ b/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/perf_tests/main.cpp @@ -0,0 +1,91 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_mpi.hpp" + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::string input_a; + std::string input_b; + + auto output = std::vector(1); + auto expected = 0; + + auto task_data = std::make_shared(); + if (world.rank() == 0) { + input_a = std::string(5000000, 'a'); + input_b = std::string(5000000, 'a'); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + } + + auto task = std::make_shared(task_data); + ASSERT_TRUE(task->validation()); + task->pre_processing(); + task->run(); + task->post_processing(); + + auto perf_attr = std::make_shared(); + perf_attr->num_running = 10; + const boost::mpi::timer current_timer; + perf_attr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perf_results = std::make_shared(); + + auto perf_analyzer = std::make_shared(task); + perf_analyzer->pipeline_run(perf_attr, perf_results); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(expected, output[0]); + } +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi, test_task_run) { + boost::mpi::communicator world; + std::string input_a; + std::string input_b; + + auto output = std::vector(1); + auto expected = 0; + + auto task_data = std::make_shared(); + if (world.rank() == 0) { + input_a = std::string(5000000, 'a'); + input_b = std::string(5000000, 'a'); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + } + + auto task = std::make_shared(task_data); + ASSERT_TRUE(task->validation()); + task->pre_processing(); + task->run(); + task->post_processing(); + + auto perf_attr = std::make_shared(); + perf_attr->num_running = 10; + const boost::mpi::timer current_timer; + perf_attr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perf_results = std::make_shared(); + + auto perf_analyzer = std::make_shared(task); + perf_analyzer->task_run(perf_attr, perf_results); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(expected, output[0]); + } +} diff --git a/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/src/ops_mpi.cpp b/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/src/ops_mpi.cpp new file mode 100644 index 00000000000..9851d84c8c2 --- /dev/null +++ b/tasks/mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/src/ops_mpi.cpp @@ -0,0 +1,93 @@ +#include "mpi/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_mpi.hpp" + +#include +#include + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::SequentialTask::validation() { + internal_order_test(); + return taskData->inputs_count[0] == taskData->inputs_count[1] && taskData->outputs_count[0] == 1; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::SequentialTask::pre_processing() { + internal_order_test(); + input_a_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + input_b_.assign(reinterpret_cast(taskData->inputs[1]), taskData->inputs_count[1]); + result_ = 0; + return true; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::SequentialTask::run() { + internal_order_test(); + for (size_t i = 0; i < input_a_.size(); ++i) { + if (input_a_[i] != input_b_[i]) { + result_++; + } + } + return true; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::SequentialTask::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result_; + return true; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] == taskData->inputs_count[1] && taskData->outputs_count[0] == 1; + } + return true; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + input_a_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + input_b_.assign(reinterpret_cast(taskData->inputs[1]), taskData->inputs_count[1]); + result_ = 0; + } + return true; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask::run() { + internal_order_test(); + std::string local_input_a; + std::string local_input_b; + if (world.rank() == 0) { + auto base_size = input_a_.size() / world.size(); + auto remainder = input_a_.size() % world.size(); + local_input_a = input_a_.substr(0, base_size); + local_input_b = input_b_.substr(0, base_size); + + auto start = base_size; + for (int p = 1; p < world.size(); ++p) { + auto size = base_size + (p <= int(remainder) ? 1 : 0); + + world.send(p, 0, input_a_.substr(start, size)); + world.send(p, 0, input_b_.substr(start, size)); + + start += size; + } + } else { + world.recv(0, 0, local_input_a); + world.recv(0, 0, local_input_b); + } + + auto local_result = 0; + for (size_t i = 0; i < local_input_a.size(); ++i) { + if (local_input_a[i] != local_input_b[i]) { + local_result++; + } + } + boost::mpi::reduce(world, local_result, result_, std::plus(), 0); + return true; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_mpi::ParallelTask::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = result_; + } + return true; +} diff --git a/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/func_tests/main.cpp b/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/func_tests/main.cpp new file mode 100644 index 00000000000..6597c81c2ba --- /dev/null +++ b/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/func_tests/main.cpp @@ -0,0 +1,167 @@ +#include + +#include +#include + +#include "seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_seq.hpp" + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, typical_scenario) { + auto input_a = std::string("abcdefg"); + auto input_b = std::string("abcxyzg"); + auto output = std::vector(1); + auto expected = 3; + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask(task_data); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected, output[0]); +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, with_special_characters_and_different_cases) { + auto input_a = std::string("abc!@#123DEFghijklmn"); + auto input_b = std::string("abc$%^456XYZghijklMn"); + auto output = std::vector(1); + auto expected = 10; + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask(task_data); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected, output[0]); +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, both_strings_are_empty) { + auto input_a = std::string(""); + auto input_b = std::string(""); + auto output = std::vector(1); + auto expected = 0; + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask(task_data); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected, output[0]); +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, all_matching_characters) { + auto input_a = std::string("aaaaaaaaaa"); + auto input_b = std::string("aaaaaaaaaa"); + auto output = std::vector(1); + auto expected = 0; + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask(task_data); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected, output[0]); +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, all_different_characters) { + auto input_a = std::string("abcdefghij"); + auto input_b = std::string("klmnopqrst"); + auto output = std::vector(1); + auto expected = 10; + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask(task_data); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(expected, output[0]); +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, error_when_input_lengths_are_different) { + auto input_a = std::string("abcdefg"); + auto input_b = std::string("abc"); + auto output = std::vector(1); + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask(task_data); + ASSERT_FALSE(task.validation()); +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, error_when_output_size_is_not_equal_to_one) { + auto input_a = std::string("abcdefg"); + auto input_b = std::string("abcxyzg"); + auto output = std::vector(0); + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask(task_data); + ASSERT_FALSE(task.validation()); +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, error_when_one_string_is_empty) { + auto input_a = std::string("abcdefg"); + auto input_b = std::string(""); + auto output = std::vector(1); + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask(task_data); + ASSERT_FALSE(task.validation()); +} diff --git a/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_seq.hpp b/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_seq.hpp new file mode 100644 index 00000000000..0d10ee8cac8 --- /dev/null +++ b/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace sarafanov_m_num_of_mismatch_characters_of_two_strings_seq { + +class SequentialTask : public ppc::core::Task { + public: + explicit SequentialTask(std::shared_ptr task_data) : Task(std::move(task_data)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_a_, input_b_; + int result_{}; +}; + +} // namespace sarafanov_m_num_of_mismatch_characters_of_two_strings_seq diff --git a/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/perf_tests/main.cpp b/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/perf_tests/main.cpp new file mode 100644 index 00000000000..c35ecc85ea7 --- /dev/null +++ b/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/perf_tests/main.cpp @@ -0,0 +1,76 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_seq.hpp" + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, test_pipeline_run) { + auto input_a = std::string(5000000, 'a'); + auto input_b = std::string(5000000, 'a'); + auto output = std::vector(1); + auto expected = 0; + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = std::make_shared(task_data); + + auto perf_attr = std::make_shared(); + perf_attr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perf_attr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perf_results = std::make_shared(); + + auto perf_analyzer = std::make_shared(task); + perf_analyzer->pipeline_run(perf_attr, perf_results); + + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(expected, output[0]); +} + +TEST(sarafanov_m_num_of_mismatch_characters_of_two_strings_seq, test_task_run) { + auto input_a = std::string(5000000, 'a'); + auto input_b = std::string(5000000, 'a'); + auto output = std::vector(1); + auto expected = 0; + + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input_a.data())); + task_data->inputs_count.emplace_back(input_a.size()); + task_data->inputs.emplace_back(reinterpret_cast(input_b.data())); + task_data->inputs_count.emplace_back(input_b.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + auto task = std::make_shared(task_data); + + auto perf_attr = std::make_shared(); + perf_attr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perf_attr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perf_results = std::make_shared(); + + auto perf_analyzer = std::make_shared(task); + perf_analyzer->task_run(perf_attr, perf_results); + + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(expected, output[0]); +} diff --git a/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/src/ops_seq.cpp b/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/src/ops_seq.cpp new file mode 100644 index 00000000000..0a297f00190 --- /dev/null +++ b/tasks/seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/src/ops_seq.cpp @@ -0,0 +1,30 @@ +#include "seq/sarafanov_m_num_of_mismatch_characters_of_two_strings/include/ops_seq.hpp" + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask::validation() { + internal_order_test(); + return taskData->inputs_count[0] == taskData->inputs_count[1] && taskData->outputs_count[0] == 1; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask::pre_processing() { + internal_order_test(); + input_a_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + input_b_.assign(reinterpret_cast(taskData->inputs[1]), taskData->inputs_count[1]); + result_ = 0; + return true; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask::run() { + internal_order_test(); + for (size_t i = 0; i < input_a_.size(); ++i) { + if (input_a_[i] != input_b_[i]) { + result_++; + } + } + return true; +} + +bool sarafanov_m_num_of_mismatch_characters_of_two_strings_seq::SequentialTask::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result_; + return true; +} From a310c5854fa5213c53b4353912d45cd03c70788f Mon Sep 17 00:00:00 2001 From: EgorDormidontov <113050823+EgorDormidontov@users.noreply.github.com> Date: Thu, 7 Nov 2024 18:12:20 +0300 Subject: [PATCH 139/155] =?UTF-8?q?=D0=94=D0=BE=D1=80=D0=BC=D0=B8=D0=B4?= =?UTF-8?q?=D0=BE=D0=BD=D1=82=D0=BE=D0=B2=20=D0=95=D0=B3=D0=BE=D1=80.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2018.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6?= =?UTF-8?q?=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC?= =?UTF-8?q?=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE?= =?UTF-8?q?=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8?= =?UTF-8?q?=D1=86=D1=8B.=20(#200)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание программы. Программа вычисляет минимальное значение в каждом столбце матрицы. 1.Seq задача: Мы проходимся по каждому столбцу и находим минимум, после чего добавляем найденное в вектор результатов. 2.MPI задача: - матрица разбивается на кол-во процессов - каждый процесс ищет минимум в каждом столбце своих данных - результаты процессов отправляются на главный процесс - Там они объединяются - Получаем вектор минимальных значений по каждому столбцу --- .../func_tests/main.cpp | 210 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 ++++ .../perf_tests/main.cpp | 92 ++++++++ .../src/ops_mpi.cpp | 137 ++++++++++++ .../func_tests/main.cpp | 189 ++++++++++++++++ .../include/ops_seq.hpp | 25 +++ .../perf_tests/main.cpp | 94 ++++++++ .../src/ops_seq.cpp | 48 ++++ 8 files changed, 842 insertions(+) create mode 100644 tasks/mpi/dormidontov_e_min_value_by_columns_mpi/func_tests/main.cpp create mode 100644 tasks/mpi/dormidontov_e_min_value_by_columns_mpi/include/ops_mpi.hpp create mode 100644 tasks/mpi/dormidontov_e_min_value_by_columns_mpi/perf_tests/main.cpp create mode 100644 tasks/mpi/dormidontov_e_min_value_by_columns_mpi/src/ops_mpi.cpp create mode 100644 tasks/seq/dormidontov_e_min_value_by_columns_seq/func_tests/main.cpp create mode 100644 tasks/seq/dormidontov_e_min_value_by_columns_seq/include/ops_seq.hpp create mode 100644 tasks/seq/dormidontov_e_min_value_by_columns_seq/perf_tests/main.cpp create mode 100644 tasks/seq/dormidontov_e_min_value_by_columns_seq/src/ops_seq.cpp diff --git a/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/func_tests/main.cpp b/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/func_tests/main.cpp new file mode 100644 index 00000000000..229fce3ad77 --- /dev/null +++ b/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/func_tests/main.cpp @@ -0,0 +1,210 @@ +#include + +#include +#include +#include + +#include "mpi/dormidontov_e_min_value_by_columns_mpi/include/ops_mpi.hpp" +boost::mpi::communicator world; + +inline std::vector generate_random_vector(int cs_temp, int rs_temp) { + std::vector temp(cs_temp * rs_temp); + for (int i = 0; i < rs_temp; i++) { + for (int j = 0; j < cs_temp; j++) { + if (i == 0) { + temp[i * rs_temp + j] = 0; + } else { + temp[i * rs_temp + j] = (rand() % 1999) - 999; + } + } + } + return temp; +} + +TEST(dormidontov_e_min_value_by_columns_mpi, Test_just_test_if_it_finally_works) { + int rs = 7; + int cs = 7; + + std::vector matrix(cs * rs); + matrix = generate_random_vector(cs, rs); + std::vector res_out_paral(cs, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rs); + taskDataPar->inputs_count.emplace_back(cs); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + dormidontov_e_min_value_by_columns_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + + testMpiTaskParallel.run(); + + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector res_out_seq(cs, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_out_seq.size()); + dormidontov_e_min_value_by_columns_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_out_paral, res_out_seq); + } +} + +TEST(dormidontov_e_min_value_by_columns_mpi, Test_just_test_if_it_finally_works2) { + int rs = 2; + int cs = 2; + + std::vector matrix(cs * rs); + matrix = generate_random_vector(cs, rs); + std::vector res_out_paral(cs, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rs); + taskDataPar->inputs_count.emplace_back(cs); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + dormidontov_e_min_value_by_columns_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector res_out_seq(cs, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_out_seq.size()); + dormidontov_e_min_value_by_columns_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(res_out_paral, res_out_seq); + } +} + +TEST(dormidontov_e_min_value_by_columns_mpi, Test_Empty) { + const int rs = 0; + const int cs = 0; + + std::vector matrix = {}; + std::vector res_out_paral(cs, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rs); + taskDataPar->inputs_count.emplace_back(cs); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + dormidontov_e_min_value_by_columns_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(dormidontov_e_min_value_by_columns_mpi, Test_just_test_if_it_finally_works5) { + int rs = 2000; + int cs = 2000; + + std::vector matrix(cs * rs); + matrix = generate_random_vector(cs, rs); + std::vector res_out_paral(cs); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rs); + taskDataPar->inputs_count.emplace_back(cs); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + dormidontov_e_min_value_by_columns_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector res_out_seq(cs, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_out_seq.size()); + dormidontov_e_min_value_by_columns_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_out_paral, res_out_seq); + } +} + +TEST(dormidontov_e_min_value_by_columns_mpi, Test_just_test_if_it_finally_works6) { + int rs = 20; + int cs = 30; + + std::vector matrix(cs * rs); + matrix = generate_random_vector(cs, rs); + std::vector res_out_paral(cs); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rs); + taskDataPar->inputs_count.emplace_back(cs); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + dormidontov_e_min_value_by_columns_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector res_out_seq(cs, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out_seq.data())); + taskDataSeq->outputs_count.emplace_back(res_out_seq.size()); + dormidontov_e_min_value_by_columns_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_out_paral, res_out_seq); + } +} \ No newline at end of file diff --git a/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/include/ops_mpi.hpp b/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/include/ops_mpi.hpp new file mode 100644 index 00000000000..ad9aa80ec55 --- /dev/null +++ b/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace dormidontov_e_min_value_by_columns_mpi { +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int cs{}; + int rs{}; + std::vector> input_; + std::vector res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int cs{}; + int rs{}; + std::vector input_; + std::vector minput_; + std::vector res_{}; + boost::mpi::communicator world; +}; +} // namespace dormidontov_e_min_value_by_columns_mpi \ No newline at end of file diff --git a/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/perf_tests/main.cpp b/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/perf_tests/main.cpp new file mode 100644 index 00000000000..f568e539b2c --- /dev/null +++ b/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/perf_tests/main.cpp @@ -0,0 +1,92 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/dormidontov_e_min_value_by_columns_mpi/include/ops_mpi.hpp" +boost::mpi::communicator world; + +TEST(dormidontov_e_min_value_by_columns_mpi, test_pipeline_run) { + int rs = 1984; + int cs = 1984; + + std::vector matrix(cs * rs); + for (int i = 0; i < rs; ++i) { + for (int j = 0; j < cs; ++j) { + matrix[i * cs + j] = i * cs + j; + } + } + std::vector res_out_paral(cs, 0); + std::vector exp_res_paral(cs, 0); + for (int j = 0; j < cs; ++j) { + exp_res_paral[j] = j; + } + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rs); + taskDataPar->inputs_count.emplace_back(cs); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res_out_paral, exp_res_paral); + } +} + +TEST(dormidontov_e_min_value_by_columns_mpi, test_task_run) { + int rs = 1984; + int cs = 1984; + + std::vector matrix(cs * rs); + for (int i = 0; i < rs; ++i) { + for (int j = 0; j < cs; ++j) { + matrix[i * cs + j] = i * cs + j; + } + } + std::vector res_out_paral(cs, 0); + std::vector exp_res_paral(cs, 0); + for (int j = 0; j < cs; ++j) { + exp_res_paral[j] = j; + } + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(rs); + taskDataPar->inputs_count.emplace_back(cs); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_out_paral.data())); + taskDataPar->outputs_count.emplace_back(res_out_paral.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res_out_paral, exp_res_paral); + } +} \ No newline at end of file diff --git a/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/src/ops_mpi.cpp b/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/src/ops_mpi.cpp new file mode 100644 index 00000000000..6741ac450e8 --- /dev/null +++ b/tasks/mpi/dormidontov_e_min_value_by_columns_mpi/src/ops_mpi.cpp @@ -0,0 +1,137 @@ +#include "mpi/dormidontov_e_min_value_by_columns_mpi/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +bool dormidontov_e_min_value_by_columns_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + rs = taskData->inputs_count[0]; + cs = taskData->inputs_count[1]; + + input_.resize(rs, std::vector(cs)); + + for (int i = 0; i < rs; ++i) { + for (int j = 0; j < cs; ++j) { + input_[i][j] = reinterpret_cast(taskData->inputs[0])[i * (cs) + j]; + } + } + + res_.resize(cs, 0); + return true; +} + +bool dormidontov_e_min_value_by_columns_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return ((taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + taskData->inputs_count[1] == taskData->outputs_count[0]); +} + +bool dormidontov_e_min_value_by_columns_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (int j = 0; j < cs; ++j) { + res_[j] = INT_MAX; + for (int i = 0; i < rs; ++i) { + if (res_[j] > input_[i][j]) { + res_[j] = input_[i][j]; + } + } + } + return true; +} + +bool dormidontov_e_min_value_by_columns_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < cs; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res_[i]; + } + return true; +} + +bool dormidontov_e_min_value_by_columns_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + rs = taskData->inputs_count[0]; + cs = taskData->inputs_count[1]; + input_.resize(rs * cs); + std::copy(reinterpret_cast(taskData->inputs[0]), reinterpret_cast(taskData->inputs[0]) + rs * cs, + input_.begin()); + } + return true; +} + +bool dormidontov_e_min_value_by_columns_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return (((taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + taskData->inputs_count[1] == taskData->outputs_count[0])); + } + return true; +} + +bool dormidontov_e_min_value_by_columns_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + if (world.rank() == 0) { + rs = taskData->inputs_count[0]; + cs = taskData->inputs_count[1]; + } + boost::mpi::broadcast(world, rs, 0); + boost::mpi::broadcast(world, cs, 0); + + int unfitrs = rs % world.size(); + int rsperpro = rs / world.size(); + int locrs; + int prs; + int a; + if (world.rank() < unfitrs) { + locrs = rsperpro + 1; + } else { + locrs = rsperpro; + } + std::vector locmin(cs, INT_MAX); + + minput_.resize(cs * locrs); + + if (world.rank() == 0) { + a = locrs * cs; + for (int i = 1; i < world.size(); i++) { + if (i < unfitrs) { + prs = rsperpro + 1; + } else { + prs = rsperpro; + } + world.send(i, 2, input_.data() + a, prs * cs); + a += cs * prs; + } + std::copy(input_.begin(), input_.begin() + locrs * cs, minput_.begin()); + } else { + world.recv(0, 2, minput_.data(), locrs * cs); + } + + for (int i = 0; i < locrs; ++i) { + for (int j = 0; j < cs; ++j) { + if (locmin[j] > minput_[i * cs + j]) { + locmin[j] = minput_[i * cs + j]; + } + } + } + res_.resize(cs, 0); + boost::mpi::reduce(world, locmin.data(), cs, res_.data(), boost::mpi::minimum(), 0); + return true; +} + +bool dormidontov_e_min_value_by_columns_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (int i = 0; i < cs; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res_[i]; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/dormidontov_e_min_value_by_columns_seq/func_tests/main.cpp b/tasks/seq/dormidontov_e_min_value_by_columns_seq/func_tests/main.cpp new file mode 100644 index 00000000000..3a4574a2aad --- /dev/null +++ b/tasks/seq/dormidontov_e_min_value_by_columns_seq/func_tests/main.cpp @@ -0,0 +1,189 @@ +#include + +#include + +#include "seq/dormidontov_e_min_value_by_columns_seq/include/ops_seq.hpp" + +TEST(dormidontov_e_min_value_by_columns_seq, test_min_values_by_columns_matrix_3x3) { + int rs = 3; + int cs = 3; + std::shared_ptr taskDataSeq = std::make_shared(); + dormidontov_e_min_value_by_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector matrix = {11, 55, 33, 77, 99, 1010, 1111, 1212, 13}; + std::vector res_out = {0, 0, 0}; + std::vector exp_res = {11, 55, 13}; + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} + +TEST(dormidontov_e_min_value_by_columns_seq, test_min_values_by_columns_matrix_5x5) { + int rs = 5; + int cs = 5; + std::shared_ptr taskDataSeq = std::make_shared(); + dormidontov_e_min_value_by_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector matrix = {8, 5, 7, 8, 15, 17, 18, 1, 9, 10, 111, 127, 1388, + 154, 1589, 1615, 1754, 18548, 1948, 2077, 21515, 22651, 23455, 24445, 25545}; + std::vector res_out = {0, 0, 0, 0, 0}; + std::vector exp_res = {8, 5, 1, 8, 10}; + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} + +TEST(dormidontov_e_min_value_by_columns_seq, test_min_values_by_columns_matrix_2x5) { + int rs = 2; + int cs = 5; + std::shared_ptr taskDataSeq = std::make_shared(); + dormidontov_e_min_value_by_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector matrix = {0, 0, 0, 22, 33, 44, 55, 66, 0, 0}; + std::vector res_out = {0, 0, 0, 0, 0}; + std::vector exp_res = {0, 0, 0, 0, 0}; + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} + +TEST(dormidontov_e_min_value_by_columns_seq, test_min_values_by_columns_matrix_7x1) { + int rs = 7; + int cs = 1; + std::shared_ptr taskDataSeq = std::make_shared(); + dormidontov_e_min_value_by_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector matrix = {11, 22, 33, 44, 55, 66, 77}; + std::vector res_out = {0}; + std::vector exp_res = {11}; + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} + +TEST(dormidontov_e_min_value_by_columns_seq, test_min_values_by_columns_matrix_1x5) { + int rs = 1; + int cs = 5; + std::shared_ptr taskDataSeq = std::make_shared(); + dormidontov_e_min_value_by_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector matrix = {11, 22, 33, 44, 55}; + std::vector res_out = {0, 0, 0, 0, 0}; + std::vector exp_res = {11, 22, 33, 44, 55}; + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} + +TEST(dormidontov_e_min_value_by_columns_seq, test_min_values_by_columns_matrix_3000x3000) { + int rs = 3000; + int cs = 3000; + std::shared_ptr taskDataSeq = std::make_shared(); + dormidontov_e_min_value_by_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector matrix(rs * cs); + for (int i = 0; i < rs; ++i) { + for (int j = 0; j < cs; ++j) { + matrix[i * cs + j] = i * 1000 + j; + } + } + std::vector res_out(cs, 0); + std::vector exp_res(cs); + for (int j = 0; j < cs; ++j) { + exp_res[j] = j; + } + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} +TEST(dormidontov_e_min_value_by_columns_seq, test_min_values_by_columns_matrix_1500x3000) { + int rs = 1500; + int cs = 3000; + std::shared_ptr taskDataSeq = std::make_shared(); + dormidontov_e_min_value_by_columns_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector matrix(rs * cs); + for (int i = 0; i < rs; ++i) { + for (int j = 0; j < cs; ++j) { + matrix[i * cs + j] = i * 1000 + j; + } + } + std::vector res_out(cs, 0); + std::vector exp_res(cs); + for (int j = 0; j < cs; ++j) { + exp_res[j] = j; + } + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + ASSERT_EQ(res_out, exp_res); +} \ No newline at end of file diff --git a/tasks/seq/dormidontov_e_min_value_by_columns_seq/include/ops_seq.hpp b/tasks/seq/dormidontov_e_min_value_by_columns_seq/include/ops_seq.hpp new file mode 100644 index 00000000000..fce439d92b4 --- /dev/null +++ b/tasks/seq/dormidontov_e_min_value_by_columns_seq/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace dormidontov_e_min_value_by_columns_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData) : Task(std::move(taskData)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int rs; + int cs; + std::vector> input_; + std::vector res; +}; + +} // namespace dormidontov_e_min_value_by_columns_seq \ No newline at end of file diff --git a/tasks/seq/dormidontov_e_min_value_by_columns_seq/perf_tests/main.cpp b/tasks/seq/dormidontov_e_min_value_by_columns_seq/perf_tests/main.cpp new file mode 100644 index 00000000000..f2b55e2dbd1 --- /dev/null +++ b/tasks/seq/dormidontov_e_min_value_by_columns_seq/perf_tests/main.cpp @@ -0,0 +1,94 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/dormidontov_e_min_value_by_columns_seq/include/ops_seq.hpp" + +TEST(dormidontov_e_min_value_by_columns_seq, test_pipeline_run) { + int rs = 3000; + int cs = 3000; + std::vector matrix(rs * cs); + for (int i = 0; i < rs; ++i) { + for (int j = 0; j < cs; ++j) { + matrix[i * cs + j] = i * 1000 + j; + } + } + + std::vector res_out(cs, 0); + std::vector exp_res(cs); + for (int j = 0; j < cs; ++j) { + exp_res[j] = j; + } + + std::shared_ptr taskDataSeq = std::make_shared(); + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(res_out, exp_res); +} + +TEST(dormidontov_e_min_value_by_columns_seq, test_task_run) { + int rs = 3000; + int cs = 3000; + std::vector matrix(rs * cs); + for (int i = 0; i < rs; ++i) { + for (int j = 0; j < cs; ++j) { + matrix[i * cs + j] = i * 1000 + j; + } + } + std::vector res_out(cs, 0); + std::vector exp_res(cs); + for (int j = 0; j < cs; ++j) { + exp_res[j] = j; + } + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(rs); + taskDataSeq->inputs_count.emplace_back(cs); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_out.data())); + taskDataSeq->outputs_count.emplace_back(res_out.size()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res_out, exp_res); +} \ No newline at end of file diff --git a/tasks/seq/dormidontov_e_min_value_by_columns_seq/src/ops_seq.cpp b/tasks/seq/dormidontov_e_min_value_by_columns_seq/src/ops_seq.cpp new file mode 100644 index 00000000000..9e57c89ea7b --- /dev/null +++ b/tasks/seq/dormidontov_e_min_value_by_columns_seq/src/ops_seq.cpp @@ -0,0 +1,48 @@ +#include "seq/dormidontov_e_min_value_by_columns_seq/include/ops_seq.hpp" + +#include +#include +bool dormidontov_e_min_value_by_columns_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + rs = taskData->inputs_count[0]; + cs = taskData->inputs_count[1]; + + input_.resize(rs, std::vector(cs)); + + for (int i = 0; i < rs; ++i) { + for (int j = 0; j < cs; ++j) { + input_[i][j] = reinterpret_cast(taskData->inputs[0])[i * (cs) + j]; + } + } + + res.resize(cs, 0); + return true; +} + +bool dormidontov_e_min_value_by_columns_seq::TestTaskSequential::validation() { + internal_order_test(); + return ((taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + taskData->inputs_count[1] == taskData->outputs_count[0]); +} + +bool dormidontov_e_min_value_by_columns_seq::TestTaskSequential::run() { + internal_order_test(); + for (int j = 0; j < cs; ++j) { + res[j] = INT_MAX; + for (int i = 0; i < rs; ++i) { + if (res[j] > input_[i][j]) { + res[j] = input_[i][j]; + } + } + } + return true; +} + +bool dormidontov_e_min_value_by_columns_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < cs; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} From 215e9b52b3bbd3c929c4fb3481fdf0d840a3056b Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 7 Nov 2024 16:16:21 +0100 Subject: [PATCH 140/155] =?UTF-8?q?Revert=20"=D0=A1=D0=B5=D0=B4=D0=BE?= =?UTF-8?q?=D0=B2=D0=B0=20=D0=9E=D0=BB=D1=8C=D0=B3=D0=B0.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2013.=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC=D0=B0?= =?UTF-8?q?=D0=BB=D1=8C=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B."=20(?= =?UTF-8?q?#248)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#112 @Sedova-Olga https://github.com/learning-process/ppc-2024-autumn/actions/runs/11722921068/job/32653481692 --- .../func_tests/main.cpp | 460 ------------------ .../include/ops_mpi.hpp | 38 -- .../perf_tests/main.cpp | 134 ----- .../src/ops_mpi.cpp | 110 ----- .../func_tests/main.cpp | 109 ----- .../include/ops_seq.hpp | 23 - .../perf_tests/main.cpp | 156 ------ .../src/ops_seq.cpp | 44 -- 8 files changed, 1074 deletions(-) delete mode 100644 tasks/mpi/sedova_o_max_of_vector_elements/func_tests/main.cpp delete mode 100644 tasks/mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp delete mode 100644 tasks/mpi/sedova_o_max_of_vector_elements/perf_tests/main.cpp delete mode 100644 tasks/mpi/sedova_o_max_of_vector_elements/src/ops_mpi.cpp delete mode 100644 tasks/seq/sedova_o_max_of_vector_elements/func_tests/main.cpp delete mode 100644 tasks/seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp delete mode 100644 tasks/seq/sedova_o_max_of_vector_elements/perf_tests/main.cpp delete mode 100644 tasks/seq/sedova_o_max_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/sedova_o_max_of_vector_elements/func_tests/main.cpp b/tasks/mpi/sedova_o_max_of_vector_elements/func_tests/main.cpp deleted file mode 100644 index b6518018d35..00000000000 --- a/tasks/mpi/sedova_o_max_of_vector_elements/func_tests/main.cpp +++ /dev/null @@ -1,460 +0,0 @@ -#include - -#include -#include -#include -#include - -#include "mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp" - -namespace sedova_o_max_of_vector_elements_mpi_test { - -std::vector generate_random_vector(size_t size, size_t value) { - std::random_device dev; - std::mt19937 random(dev()); - std::vector vec(size); - for (size_t i = 0; i < size; i++) { - vec[i] = random() % (value + 1); - } - return vec; -} - -std::vector> generate_random_matrix(size_t rows, size_t cols, size_t value) { - std::vector> matrix(rows); - for (size_t i = 0; i < rows; i++) { - matrix[i] = generate_random_vector(cols, value); - } - return matrix; -} -} // namespace sedova_o_max_of_vector_elements_mpi_test - -TEST(sedova_o_max_of_vector_elements_mpi, Test1) { - ASSERT_NO_THROW(sedova_o_max_of_vector_elements_mpi_test::generate_random_vector(10, 10)); -} -TEST(sedova_o_max_of_vector_elements_mpi, Test2) { - ASSERT_NO_THROW(sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(10, 10, 10)); -} -TEST(sedova_o_max_of_vector_elements_mpi, Test3) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -30); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(1, 1, 20); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); - taskDataPar->inputs_count.emplace_back(1); - taskDataPar->inputs_count.emplace_back(1); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi, Test_1_5) { - size_t rows = 1; - size_t cols = 5; - size_t value = 20; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -(int)value); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - testMpiTaskParallel.validation(); - EXPECT_EQ(testMpiTaskParallel.pre_processing(), true); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi, Test_10_10) { - size_t rows = 10; - size_t cols = 10; - size_t value = 30; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)(value))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matrix[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi, Test_100_100) { - size_t rows = 100; - size_t cols = 100; - size_t value = 30; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)(value))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matrix[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi, Test_1000_1000) { - size_t rows = 1000; - size_t cols = 1000; - size_t value = 30; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)(value))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matrix[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi, Test_10_100) { - size_t rows = 10; - size_t cols = 100; - size_t value = 30; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)(value))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matrix[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi, Test_100_10) { - size_t rows = 100; - size_t cols = 10; - size_t value = 30; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)(value))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matrix[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi, Test_500_10) { - size_t rows = 10; - size_t cols = 500; - size_t value = 30; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)(value))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matrix[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi, Test_50_2) { - size_t rows = 2; - size_t cols = 50; - size_t value = 30; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)(value))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matrix[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi, Test_10_2) { - size_t rows = 2; - size_t cols = 10; - size_t value = 30; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -((int)(value))); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, global_matrix[0][0]); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(rows); - taskDataSeq->inputs_count.emplace_back(cols); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp deleted file mode 100644 index c68866ccca6..00000000000 --- a/tasks/mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp +++ /dev/null @@ -1,38 +0,0 @@ -#pragma once - -#include - -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace sedova_o_max_of_vector_elements_mpi { -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int res_{}; - std::vector> input_; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - boost::mpi::communicator world; - std::vector input_, loc_input_; - int res_{}; -}; -} // namespace sedova_o_max_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/sedova_o_max_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/sedova_o_max_of_vector_elements/perf_tests/main.cpp deleted file mode 100644 index 7819c68ea72..00000000000 --- a/tasks/mpi/sedova_o_max_of_vector_elements/perf_tests/main.cpp +++ /dev/null @@ -1,134 +0,0 @@ -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp" - -namespace sedova_o_max_of_vector_elements_mpi_test { - -std::vector generate_random_vector(size_t size, size_t value) { - std::random_device dev; - std::mt19937 random(dev()); - std::vector vec(size); - for (size_t i = 0; i < size; i++) { - vec[i] = random() % (value + 1); - } - return vec; -} -std::vector> generate_random_matrix(size_t rows, size_t cols, size_t value) { - std::vector> matrix(rows); - for (size_t i = 0; i < rows; i++) { - matrix[i] = generate_random_vector(cols, value); - } - return matrix; -} -} // namespace sedova_o_max_of_vector_elements_mpi_test - -TEST(sedova_o_max_of_vector_elements_mpi1, test_pipeline_run) { - size_t rows = 7000; - size_t cols = 7000; - int value = 7000; - - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -(int)value); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - std::random_device dev; - std::mt19937 random(dev()); - - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - rows = random() % rows; - cols = random() % cols; - global_matrix[rows][cols] = value; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(value, global_max[0]); - } -} - -TEST(sedova_o_max_of_vector_elements_mpi1, test_task_run) { - size_t rows = 7000; - size_t cols = 7000; - int value = 7000; - - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, -(int)value); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - std::random_device dev; - std::mt19937 random(dev()); - - global_matrix = sedova_o_max_of_vector_elements_mpi_test::generate_random_matrix(rows, cols, value); - rows = random() % rows; - cols = random() % cols; - global_matrix[rows][cols] = value; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(rows); - taskDataPar->inputs_count.emplace_back(cols); - - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(value, global_max[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/sedova_o_max_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/sedova_o_max_of_vector_elements/src/ops_mpi.cpp deleted file mode 100644 index 4fd9f4d5f30..00000000000 --- a/tasks/mpi/sedova_o_max_of_vector_elements/src/ops_mpi.cpp +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2024 Sedova Olga -#include "mpi/sedova_o_max_of_vector_elements/include/ops_mpi.hpp" - -#include - -#include - -int find_max_of_matrix(std::vector &matrix) { - if (matrix.empty()) { - return std::numeric_limits::min(); - } - auto max_it = std::max_element(matrix.begin(), matrix.end()); - return *max_it; -} - -bool sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - unsigned int rows = taskData->inputs_count[0]; - unsigned int cols = taskData->inputs_count[1]; - input_ = std::vector>(rows, std::vector(cols)); - for (unsigned int i = 0; i < rows; i++) { - auto *tmp_ptr = reinterpret_cast(taskData->inputs[i]); - std::copy(tmp_ptr, tmp_ptr + cols, input_[i].begin()); - } - res_ = INT_MIN; - return true; -} - -bool sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - return taskData->inputs_count[0] >= 1 && taskData->inputs_count[1] >= 1 && taskData->outputs_count[0] == 1; -} - -bool sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential::run() { - internal_order_test(); - std::vector local_(input_.size()); - for (unsigned int i = 0; i < input_.size(); i++) { - local_[i] = *std::max_element(input_[i].begin(), input_[i].end()); - } - res_ = *std::max_element(local_.begin(), local_.end()); - return true; -} - -bool sedova_o_max_of_vector_elements_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res_; - return true; -} - -bool sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - if (world.rank() == 0) { - unsigned int rows = taskData->inputs_count[0]; - unsigned int cols = taskData->inputs_count[1]; - input_ = std::vector(rows * cols); - for (unsigned int i = 0; i < rows; i++) { - auto *input_data = reinterpret_cast(taskData->inputs[i]); - for (unsigned int j = 0; j < cols; j++) { - input_[i * cols + j] = input_data[j]; - } - } - } - return true; -} - -bool sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - return (world.rank() != 0) || ((taskData->outputs_count[0] == 1) && (!taskData->inputs.empty())); -} - -bool sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel::run() { - internal_order_test(); - unsigned int a = 0; - if (world.rank() == 0) { - a = taskData->inputs_count[0] * taskData->inputs_count[1] / world.size(); - } - broadcast(world, a, 0); - if (world.rank() == 0) { - unsigned int rows = taskData->inputs_count[0]; - unsigned int cols = taskData->inputs_count[1]; - input_ = std::vector(rows * cols); - for (unsigned int i = 0; i < rows; i++) { - auto *tmp_ = reinterpret_cast(taskData->inputs[i]); - for (unsigned int j = 0; j < cols; j++) { - input_[i * cols + j] = tmp_[j]; - } - } - for (int i = 1; i < world.size(); i++) { - world.send(i, 0, input_.data() + a * i, a); - } - } - loc_input_ = std::vector(a); - if (world.rank() == 0) { - loc_input_ = std::vector(input_.begin(), input_.begin() + a); - } else { - world.recv(0, 0, loc_input_.data(), a); - } - int loc_res = *std::max_element(loc_input_.begin(), loc_input_.end()); - reduce(world, loc_res, res_, boost::mpi::maximum(), 0); - return true; -} - -bool sedova_o_max_of_vector_elements_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = res_; - } - return true; -} diff --git a/tasks/seq/sedova_o_max_of_vector_elements/func_tests/main.cpp b/tasks/seq/sedova_o_max_of_vector_elements/func_tests/main.cpp deleted file mode 100644 index 44f6fb536b8..00000000000 --- a/tasks/seq/sedova_o_max_of_vector_elements/func_tests/main.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2024 Sedova Olga -#include - -#include -#include - -#include "core/task/include/task.hpp" -#include "seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp" - -namespace sedova_o_max_of_vector_elements_seq_test { - -std::vector generate_random_vector(size_t size, size_t value) { - std::random_device dev; - std::mt19937 random(dev()); - std::vector vec(size); - for (size_t i = 0; i < size; i++) { - vec[i] = random() % (value + 1); - } - return vec; -} - -std::vector> generate_random_matrix(size_t rows, size_t cols, size_t value) { - std::vector> matrix(rows); - for (size_t i = 0; i < rows; i++) { - matrix[i] = generate_random_vector(cols, value); - } - return matrix; -} -} // namespace sedova_o_max_of_vector_elements_seq_test - -TEST(sedova_o_max_of_vector_elements_seq1, Test_Sum_Empty1) { - // Create data - std::vector in; - std::vector out(1, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - sedova_o_max_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(sedova_o_max_of_vector_elements_seq1, Test_Sum_Input_Incorrect) { - int count = 10; - // Create data - std::vector in(count, 0); - std::vector out(1, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(0); // Неверный размер входного вектора - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - sedova_o_max_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(sedova_o_max_of_vector_elements_seq1, Test_Matrix_2x2) { - // Create data - std::vector in = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(2, 10); - std::vector in2 = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(2, 10); - std::vector out(1, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in2.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->inputs_count.emplace_back(in2.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - sedova_o_max_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - std::vector matrix_input = {in[0], in[1], in2[0], in2[1]}; - ASSERT_EQ(sedova_o_max_of_vector_elements_seq::find_max_of_matrix(matrix_input), out[0]); -} - -TEST(sedova_o_max_of_vector_elements_seq1, Test_Matrix_3x3) { - // Create data - std::vector in = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(3, 10); - std::vector in2 = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(3, 10); - std::vector in3 = sedova_o_max_of_vector_elements_seq_test::generate_random_vector(3, 10); - std::vector out(1, 0); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in2.data())); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in3.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->inputs_count.emplace_back(in2.size()); - taskDataSeq->inputs_count.emplace_back(in3.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - // Create Task - sedova_o_max_of_vector_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - std::vector matrix_input = {in[0], in[1], in[2], in2[0], in2[1], in2[2], in3[0], in3[1], in3[2]}; - ASSERT_EQ(sedova_o_max_of_vector_elements_seq::find_max_of_matrix(matrix_input), out[0]); -} diff --git a/tasks/seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp b/tasks/seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp deleted file mode 100644 index e94ff3d479e..00000000000 --- a/tasks/seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include - -#include "core/task/include/task.hpp" - -namespace sedova_o_max_of_vector_elements_seq { -int find_max_of_matrix(std::vector matrix); - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {}; - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - int res_{}; - std::vector input_{}; -}; - -} // namespace sedova_o_max_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/sedova_o_max_of_vector_elements/perf_tests/main.cpp b/tasks/seq/sedova_o_max_of_vector_elements/perf_tests/main.cpp deleted file mode 100644 index 5fece2a10e8..00000000000 --- a/tasks/seq/sedova_o_max_of_vector_elements/perf_tests/main.cpp +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2024 Sedova Olga -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "core/task/include/task.hpp" -#include "seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp" - -namespace sedova_o_max_of_vector_elements_seq_test { - -std::vector generate_random_vector(size_t size, size_t value) { - std::random_device dev; - std::mt19937 random(dev()); - std::vector vec(size); - for (size_t i = 0; i < size; i++) { - vec[i] = random() % (value + 1); - } - return vec; -} - -std::vector> generate_random_matrix(size_t rows, size_t cols, size_t value) { - std::vector> matrix(rows); - for (size_t i = 0; i < rows; i++) { - matrix[i] = generate_random_vector(cols, value); - } - return matrix; -} -} // namespace sedova_o_max_of_vector_elements_seq_test - -TEST(sedova_o_max_of_vector_elements_seq, test_pipeline_run_small_matrix) { - std::random_device dev; - std::mt19937 random(dev()); - - std::shared_ptr taskDataSeq = std::make_shared(); - size_t size = 5000; - int value = 5000; - - std::vector> in; - in = sedova_o_max_of_vector_elements_seq_test::generate_random_matrix(size, size, value); - std::vector out(1, in[0][0]); - - size_t rows = random() % size; - size_t cols = random() % size; - in[rows][cols] = value; - - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - auto testTaskSequential = std::make_shared(taskDataSeq); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(value, out[0]); -} - -TEST(sedova_o_max_of_vector_elements_seq, test_pipeline_run_large_matrix) { - std::random_device dev; - std::mt19937 random(dev()); - - std::shared_ptr taskDataSeq = std::make_shared(); - size_t size = 50000; - int value = 50000; - - std::vector> in; - in = sedova_o_max_of_vector_elements_seq_test::generate_random_matrix(size, size, value); - std::vector out(1, in[0][0]); - - size_t rows = random() % size; - size_t cols = random() % size; - in[rows][cols] = value; - - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - auto testTaskSequential = std::make_shared(taskDataSeq); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(value, out[0]); -} - -TEST(sedova_o_max_of_vector_elements_seq, test_pipeline_run_different_values) { - std::random_device dev; - std::mt19937 random(dev()); - - std::shared_ptr taskDataSeq = std::make_shared(); - size_t size = 15000; - int value = 15000; - - std::vector> in; - in = sedova_o_max_of_vector_elements_seq_test::generate_random_matrix(size, size, value); - std::vector out(1, in[0][0]); - - size_t rows = random() % size; - size_t cols = random() % size; - in[rows][cols] = value + 1; - - for (unsigned int i = 0; i < in.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->inputs_count.emplace_back(size); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - auto testTaskSequential = std::make_shared(taskDataSeq); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(value + 1, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/sedova_o_max_of_vector_elements/src/ops_seq.cpp b/tasks/seq/sedova_o_max_of_vector_elements/src/ops_seq.cpp deleted file mode 100644 index c3417af265b..00000000000 --- a/tasks/seq/sedova_o_max_of_vector_elements/src/ops_seq.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2024 Sedova Olga -#include "seq/sedova_o_max_of_vector_elements/include/ops_seq.hpp" - -int sedova_o_max_of_vector_elements_seq::find_max_of_matrix(std::vector matrix) { - if (matrix.empty()) return 1; - int max = matrix[0]; - for (size_t i = 0; i < matrix.size(); i++) { - if (matrix[i] > max) { - max = matrix[i]; - } - } - return max; -} - -bool sedova_o_max_of_vector_elements_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - unsigned int rows = taskData->inputs_count[0]; - unsigned int cols = taskData->inputs_count[1]; - input_ = std::vector(rows * cols); - for (unsigned int i = 0; i < rows; i++) { - auto* input_data = reinterpret_cast(taskData->inputs[i]); - for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { - input_[i * cols + j] = input_data[j]; - } - } - return true; -} - -bool sedova_o_max_of_vector_elements_seq::TestTaskSequential::validation() { - internal_order_test(); - return taskData->inputs_count[0] >= 1 && taskData->inputs_count[1] >= 1 && taskData->outputs_count[0] == 1; -} - -bool sedova_o_max_of_vector_elements_seq::TestTaskSequential::run() { - internal_order_test(); - res_ = sedova_o_max_of_vector_elements_seq::find_max_of_matrix(input_); - return true; -} - -bool sedova_o_max_of_vector_elements_seq::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res_; - return true; -} \ No newline at end of file From 6b9be4dcea6d81f284a3e1c29a82648e4051afc7 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 7 Nov 2024 17:05:29 +0100 Subject: [PATCH 141/155] =?UTF-8?q?Revert=20"=D0=90=D0=BB=D0=BF=D1=83?= =?UTF-8?q?=D1=82=D0=BE=D0=B2=20=D0=98=D0=B2=D0=B0=D0=BD.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=208.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5?= =?UTF-8?q?=D0=BD=D0=B8=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1=D0=BE=D0=BB=D0=B5?= =?UTF-8?q?=D0=B5=20=D0=BE=D1=82=D0=BB=D0=B8=D1=87=D0=B0=D1=8E=D1=89=D0=B8?= =?UTF-8?q?=D1=85=D1=81=D1=8F=20=D0=BF=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D1=8E=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD?= =?UTF-8?q?=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE?= =?UTF-8?q?=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0."=20(#251)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#147 @Applejack2004 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11725918957/job/32663343486 --- .../func_tests/main.cpp | 347 ------------------ .../include/ops_mpi.hpp | 49 --- .../perf_tests/main.cpp | 86 ----- .../src/ops_mpi.cpp | 108 ------ .../func_tests/main.cpp | 131 ------- .../include/ops_seq.hpp | 24 -- .../perf_tests/main.cpp | 74 ---- .../src/ops_seq.cpp | 44 --- 8 files changed, 863 deletions(-) delete mode 100644 tasks/mpi/alputov_i_most_different_neighbor_elements/func_tests/main.cpp delete mode 100644 tasks/mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp delete mode 100644 tasks/mpi/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp delete mode 100644 tasks/mpi/alputov_i_most_different_neighbor_elements/src/ops_mpi.cpp delete mode 100644 tasks/seq/alputov_i_most_different_neighbor_elements/func_tests/main.cpp delete mode 100644 tasks/seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp delete mode 100644 tasks/seq/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp delete mode 100644 tasks/seq/alputov_i_most_different_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/alputov_i_most_different_neighbor_elements/func_tests/main.cpp b/tasks/mpi/alputov_i_most_different_neighbor_elements/func_tests/main.cpp deleted file mode 100644 index 3815e2ea5f0..00000000000 --- a/tasks/mpi/alputov_i_most_different_neighbor_elements/func_tests/main.cpp +++ /dev/null @@ -1,347 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -#include "mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp" - -namespace alputov_i_most_different_neighbor_elements_mpi { -std::vector generator(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - - std::vector ans(sz); - for (int i = 0; i < sz; ++i) { - ans[i] = gen() % 1000; - int x = gen() % 2; - if (x == 0) ans[i] *= -1; - } - - return ans; -} -} // namespace alputov_i_most_different_neighbor_elements_mpi - -TEST(alputov_i_most_different_neighbor_elements_mpi, EmptyInput_ReturnsFalse) { - boost::mpi::communicator world; - std::vector global_vec(1); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - // Create data - std::vector reference_ans(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_ans.data())); - taskDataSeq->outputs_count.emplace_back(reference_ans.size()); - - // Create Task - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq - most_different_neighbor_elements_seq(taskDataSeq); - ASSERT_EQ(most_different_neighbor_elements_seq.validation(), false); - } -} - -TEST(alputov_i_most_different_neighbor_elements_mpi, InputSizeTwo_CorrectResult) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_diff(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int sz = 2; - global_vec = std::vector(sz, 0); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); - taskDataPar->outputs_count.emplace_back(global_diff.size()); - } - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_diff(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_diff.data())); - taskDataSeq->outputs_count.emplace_back(reference_diff.size()); - - // Create Task - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq - most_different_neighbor_elements_seq(taskDataSeq); - ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); - most_different_neighbor_elements_seq.pre_processing(); - most_different_neighbor_elements_seq.run(); - most_different_neighbor_elements_seq.post_processing(); - - ASSERT_EQ(reference_diff[0], global_diff[0]); - } -} -TEST(alputov_i_most_different_neighbor_elements_mpi, LargeRandomInput_CorrectResult) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int sz = 1234; - global_vec = alputov_i_most_different_neighbor_elements_mpi::generator(sz); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq - most_different_neighbor_elements_seq(taskDataSeq); - ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); - most_different_neighbor_elements_seq.pre_processing(); - most_different_neighbor_elements_seq.run(); - most_different_neighbor_elements_seq.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(alputov_i_most_different_neighbor_elements_mpi, MediumRandomInput_CorrectResult) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int sz = 120; - global_vec = alputov_i_most_different_neighbor_elements_mpi::generator(sz); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector reference_max(1); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - // Create Task - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq - most_different_neighbor_elements_seq(taskDataSeq); - ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); - most_different_neighbor_elements_seq.pre_processing(); - most_different_neighbor_elements_seq.run(); - most_different_neighbor_elements_seq.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(alputov_i_most_different_neighbor_elements_mpi, AllEqualElements_CorrectResult) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1); - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int sz = 100; - global_vec = std::vector(sz, 0); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_max(1); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq - most_different_neighbor_elements_seq(taskDataSeq); - ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); - most_different_neighbor_elements_seq.pre_processing(); - most_different_neighbor_elements_seq.run(); - most_different_neighbor_elements_seq.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(alputov_i_most_different_neighbor_elements_mpi, AlternatingElements_CorrectResult) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1); - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_vec = {1, -1, 1, -1, 1, -1, 1, -1, 1, -1}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_max(1); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq - most_different_neighbor_elements_seq(taskDataSeq); - ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); - most_different_neighbor_elements_seq.pre_processing(); - most_different_neighbor_elements_seq.run(); - most_different_neighbor_elements_seq.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(alputov_i_most_different_neighbor_elements_mpi, ConstantDifferenceSequence_CorrectResult) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1); - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - const int sz = 123; - global_vec.resize(sz); - for (int i = 0; i < sz; ++i) { - global_vec[i] = sz - i; - } - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_max(1); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq - most_different_neighbor_elements_seq(taskDataSeq); - ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); - most_different_neighbor_elements_seq.pre_processing(); - most_different_neighbor_elements_seq.run(); - most_different_neighbor_elements_seq.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(alputov_i_most_different_neighbor_elements_mpi, MostlyZerosInput_ReturnsCorrectPair) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_max(1); - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - global_vec = {12, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - std::vector reference_max(1); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataSeq->inputs_count.emplace_back(global_vec.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - - alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq - most_different_neighbor_elements_seq(taskDataSeq); - ASSERT_EQ(most_different_neighbor_elements_seq.validation(), true); - most_different_neighbor_elements_seq.pre_processing(); - most_different_neighbor_elements_seq.run(); - most_different_neighbor_elements_seq.post_processing(); - - ASSERT_EQ(reference_max[0], global_max[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp deleted file mode 100644 index 7a939f660f0..00000000000 --- a/tasks/mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp +++ /dev/null @@ -1,49 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace alputov_i_most_different_neighbor_elements_mpi { - -class most_different_neighbor_elements_seq : public ppc::core::Task { - public: - explicit most_different_neighbor_elements_seq(std::shared_ptr taskData_) - : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - std::pair res{}; -}; - -class most_different_neighbor_elements_mpi : public ppc::core::Task { - public: - explicit most_different_neighbor_elements_mpi(std::shared_ptr taskData_) - : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - std::pair res; - size_t size; - size_t st; - boost::mpi::communicator world; -}; - -} // namespace alputov_i_most_different_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp deleted file mode 100644 index bd9ebae8591..00000000000 --- a/tasks/mpi/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp +++ /dev/null @@ -1,86 +0,0 @@ -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp" - -TEST(alputov_i_most_different_neighbor_elements_mpi, test_pipeline_run) { - boost::mpi::communicator world; - std::vector global_vec(20000000, 0); - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - auto testMpiTaskParallel = - std::make_shared( - taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(2, global_sum[0]); - } -} - -TEST(alputov_i_most_different_neighbor_elements_mpi, test_task_run) { - boost::mpi::communicator world; - std::vector global_vec(20000000, 0); - std::vector global_sum(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); - taskDataPar->outputs_count.emplace_back(global_sum.size()); - } - - auto testMpiTaskParallel = - std::make_shared( - taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(0, global_sum[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/alputov_i_most_different_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/alputov_i_most_different_neighbor_elements/src/ops_mpi.cpp deleted file mode 100644 index 16552d1a870..00000000000 --- a/tasks/mpi/alputov_i_most_different_neighbor_elements/src/ops_mpi.cpp +++ /dev/null @@ -1,108 +0,0 @@ -#include "mpi/alputov_i_most_different_neighbor_elements/include/ops_mpi.hpp" - -#include -#include -#include -#include - -#include "seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp" - -bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq::pre_processing() { - internal_order_test(); - - auto input = std::vector(taskData->inputs_count[0]); - auto* tmp = reinterpret_cast(taskData->inputs[0]); - std::copy(tmp, tmp + taskData->inputs_count[0], input.begin()); - - input_ = std::vector>(input.size() - 1); - - for (size_t i = 1; i < input.size(); ++i) { - input_[i - 1] = {std::abs(input[i] - input[i - 1]), std::min(input[i], input[i - 1])}; - } - - res = input_[0]; - - return true; -} - -bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq::validation() { - internal_order_test(); - return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; -} - -bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq::run() { - internal_order_test(); - - for (size_t i = 1; i < input_.size(); ++i) { - if (res.first < input_[i].first) res = input_[i]; - } - return true; -} - -bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_seq::post_processing() { - internal_order_test(); - - reinterpret_cast(taskData->outputs[0])[0] = res.first; - return true; -} - -bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi::pre_processing() { - internal_order_test(); - - res = {INT_MIN, -1}; - return true; -} - -bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi::validation() { - internal_order_test(); - if (world.rank() == 0) { - return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; - } - return true; -} - -bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi::run() { - internal_order_test(); - - int delta_size = 0; - if (world.rank() == 0) { - delta_size = (taskData->inputs_count[0]) / world.size(); - size = taskData->inputs_count[0]; - if (taskData->inputs_count[0] % world.size() > 0) delta_size++; - } - broadcast(world, delta_size, 0); - - if (world.rank() == 0) { - input_ = std::vector(world.size() * delta_size + 2, 0); - auto* tmp = reinterpret_cast(taskData->inputs[0]); - std::copy(tmp, tmp + taskData->inputs_count[0], input_.begin()); - for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, input_.data() + proc * delta_size, delta_size + 1); - } - } - - local_input_ = std::vector(delta_size + 1); - st = world.rank() * delta_size; - if (world.rank() == 0) { - local_input_ = std::vector(input_.begin(), input_.begin() + delta_size + 1); - } else { - world.recv(0, 0, local_input_.data(), delta_size + 1); - } - - std::pair local_ans = {INT_MIN, -1}; - for (size_t i = 0; (i + st) < size - 1 && i < (local_input_.size() - 1); ++i) { - std::pair tmp = {abs(local_input_[i + 1] - local_input_[i]), i + st}; - local_ans = std::max(local_ans, tmp); - } - reduce(world, local_ans, res, boost::mpi::maximum>(), 0); - return true; -} - -bool alputov_i_most_different_neighbor_elements_mpi::most_different_neighbor_elements_mpi::post_processing() { - internal_order_test(); - - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = res.first; - } - return true; -} \ No newline at end of file diff --git a/tasks/seq/alputov_i_most_different_neighbor_elements/func_tests/main.cpp b/tasks/seq/alputov_i_most_different_neighbor_elements/func_tests/main.cpp deleted file mode 100644 index d46f5759699..00000000000 --- a/tasks/seq/alputov_i_most_different_neighbor_elements/func_tests/main.cpp +++ /dev/null @@ -1,131 +0,0 @@ -#include - -#include -#include - -#include "seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp" - -namespace alputov_i_most_different_neighbor_elements_seq { -std::vector generator(int sz) { - std::random_device dev; - std::mt19937 gen(dev()); - - std::vector ans(sz); - for (int i = 0; i < sz; ++i) { - ans[i] = gen() % 1000; - int x = gen() % 2; - if (x == 0) ans[i] *= -1; - } - - return ans; -} -} // namespace alputov_i_most_different_neighbor_elements_seq - -TEST(alputov_i_most_different_neighbor_elements_seq, EmptyInput_ReturnsFalse) { - std::vector in = {}; - std::vector> out(1); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(alputov_i_most_different_neighbor_elements_seq, InputSizeTwo_ReturnsCorrectPair) { - std::vector in = alputov_i_most_different_neighbor_elements_seq::generator(2); - std::vector> out(1); - std::pair ans = {std::min(in[0], in[1]), std::max(in[0], in[1])}; - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(alputov_i_most_different_neighbor_elements_seq, SequentialInput_ReturnsFirstTwoElements) { - std::vector in = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - std::vector> out(1); - std::pair ans = {1, 2}; - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(alputov_i_most_different_neighbor_elements_seq, MostlyZerosInput_ReturnsZeroAndLargest) { - std::vector in = {0, 0, 0, 0, 0, 0, 0, 0, 0, 12}; - std::vector> out(1); - std::pair ans = {0, 12}; - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(alputov_i_most_different_neighbor_elements_seq, AllZerosInput_ReturnsZeroZero) { - std::vector in(100, 0); - std::vector> out(1); - std::pair ans = {0, 0}; - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} - -TEST(alputov_i_most_different_neighbor_elements_seq, CloseNegativeNumbers_ReturnsCorrectPair) { - std::vector in = {-1, -2, -3, -4, -1000}; - std::vector> out(1); - std::pair ans = {-1000, -4}; - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(ans, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp b/tasks/seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp deleted file mode 100644 index 57e9b7d14c3..00000000000 --- a/tasks/seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -#include -#include - -#include "core/task/include/task.hpp" - -namespace alputov_i_most_different_neighbor_elements_seq { - -class most_different_neighbor_elements_seq : public ppc::core::Task { - public: - explicit most_different_neighbor_elements_seq(std::shared_ptr taskData_) - : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - std::pair res{}; -}; - -} // namespace alputov_i_most_different_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/seq/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp deleted file mode 100644 index c2ade47b3ed..00000000000 --- a/tasks/seq/alputov_i_most_different_neighbor_elements/perf_tests/main.cpp +++ /dev/null @@ -1,74 +0,0 @@ -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp" - -TEST(alputov_i_most_different_neighbor_elements_seq, test_pipeline_run) { - std::vector in(20000000, 0); - std::vector> out(1); - - std::pair ans = {0, 0}; - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - auto testTaskSequential = - std::make_shared( - taskDataSeq); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - ASSERT_EQ(ans, out[0]); -} - -TEST(alputov_i_most_different_neighbor_elements_seq, test_task_run) { - std::vector in(20000000, 0); - std::vector> out(1); - - std::pair ans = {0, 0}; - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - auto testTaskSequential = - std::make_shared( - taskDataSeq); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - ASSERT_EQ(ans, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/alputov_i_most_different_neighbor_elements/src/ops_seq.cpp b/tasks/seq/alputov_i_most_different_neighbor_elements/src/ops_seq.cpp deleted file mode 100644 index e6d53206c87..00000000000 --- a/tasks/seq/alputov_i_most_different_neighbor_elements/src/ops_seq.cpp +++ /dev/null @@ -1,44 +0,0 @@ -#include "seq/alputov_i_most_different_neighbor_elements/include/ops_seq.hpp" - -#include -#include - -bool alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq::pre_processing() { - internal_order_test(); - - auto input = std::vector(taskData->inputs_count[0]); - auto* tmp = reinterpret_cast(taskData->inputs[0]); - std::copy(tmp, tmp + taskData->inputs_count[0], input.begin()); - - input_ = std::vector>(input.size() - 1); - - for (size_t i = 1; i < input.size(); ++i) { - input_[i - 1] = {std::abs(input[i] - input[i - 1]), std::min(input[i], input[i - 1])}; - } - - res = input_[0]; - - return true; -} - -bool alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq::validation() { - internal_order_test(); - return taskData->inputs_count[0] > 1 && taskData->outputs_count[0] == 1; -} - -bool alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq::run() { - internal_order_test(); - - for (size_t i = 1; i < input_.size(); ++i) { - if (res.first < input_[i].first) res = input_[i]; - } - - return true; -} - -bool alputov_i_most_different_neighbor_elements_seq::most_different_neighbor_elements_seq::post_processing() { - internal_order_test(); - - reinterpret_cast*>(taskData->outputs[0])[0] = {res.second, res.second + res.first}; - return true; -} From 42b99693f7771e167caa2b8535df83667d36bf3c Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 7 Nov 2024 17:23:39 +0100 Subject: [PATCH 142/155] =?UTF-8?q?Revert=20"=D0=A7=D0=B0=D1=81=D1=82?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=92=D1=8F=D1=87=D0=B5=D1=81=D0=BB=D0=B0=D0=B2?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2024.=20=D0=9F=D0=BE=D0=B4=D1=81?= =?UTF-8?q?=D1=87=D1=91=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=81?= =?UTF-8?q?=D0=BB=D0=BE=D0=B2=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA?= =?UTF-8?q?=D0=B5."=20(#252)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#160 @ChastovSlava https://github.com/learning-process/ppc-2024-autumn/actions/runs/11726801606/job/32666326391 --- .../func_tests/main.cpp | 330 ------------------ .../include/ops_mpi.hpp | 47 --- .../perf_tests/main.cpp | 84 ----- .../src/ops_mpi.cpp | 86 ----- .../func_tests/main.cpp | 185 ---------- .../include/ops_seq.hpp | 26 -- .../perf_tests/main.cpp | 93 ----- .../src/ops_seq.cpp | 46 --- 8 files changed, 897 deletions(-) delete mode 100644 tasks/mpi/chastov_v_count_words_in_line/func_tests/main.cpp delete mode 100644 tasks/mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp delete mode 100644 tasks/mpi/chastov_v_count_words_in_line/perf_tests/main.cpp delete mode 100644 tasks/mpi/chastov_v_count_words_in_line/src/ops_mpi.cpp delete mode 100644 tasks/seq/chastov_v_count_words_in_line/func_tests/main.cpp delete mode 100644 tasks/seq/chastov_v_count_words_in_line/include/ops_seq.hpp delete mode 100644 tasks/seq/chastov_v_count_words_in_line/perf_tests/main.cpp delete mode 100644 tasks/seq/chastov_v_count_words_in_line/src/ops_seq.cpp diff --git a/tasks/mpi/chastov_v_count_words_in_line/func_tests/main.cpp b/tasks/mpi/chastov_v_count_words_in_line/func_tests/main.cpp deleted file mode 100644 index d96d0eb7ebc..00000000000 --- a/tasks/mpi/chastov_v_count_words_in_line/func_tests/main.cpp +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2024 Chastov Vyacheslav -#include - -#include "mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp" - -std::vector createTestInput(int n) { - std::vector wordCountInput; - std::string testString = "This is a proposal to evaluate the performance of a word counting algorithm via MPI. "; - for (int i = 0; i < n; i++) { - for (unsigned long int j = 0; j < testString.length(); j++) { - wordCountInput.push_back(testString[j]); - } - } - return wordCountInput; -} - -// Test to check the behavior of the MPI word counting function with an empty string -TEST(chastov_v_count_words_in_line_mpi, empty_string) { - boost::mpi::communicator world; - std::vector input = {}; - std::vector wordsFound(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataPar->inputs_count.emplace_back(input.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskDataPar->outputs_count.emplace_back(wordsFound.size()); - - chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); - ASSERT_FALSE(testTaskParallel.validation()); - } -} - -// Test to verify the MPI word counting function with a single word input ("hello") -TEST(chastov_v_count_words_in_line_mpi, words_1) { - boost::mpi::communicator world; - std::vector input; - std::string testString = "hello"; - for (unsigned long int j = 0; j < testString.length(); j++) { - input.push_back(testString[j]); - } - std::vector wordsFound(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataPar->inputs_count.emplace_back(input.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskDataPar->outputs_count.emplace_back(wordsFound.size()); - } - - chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector referenceWordFound(1, 0); - - // Create TaskData - std::shared_ptr taskDataSequential = std::make_shared(); - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); - - // Create Task - chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordsFound[0], referenceWordFound[0]); - } -} - -// Test to verify the MPI word counting function with an input string containing four words ("My name is Slava") -TEST(chastov_v_count_words_in_line_mpi, words_4) { - boost::mpi::communicator world; - std::vector input; - std::string testString = "My name is Slava"; - for (unsigned long int j = 0; j < testString.length(); j++) { - input.push_back(testString[j]); - } - std::vector wordsFound(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataPar->inputs_count.emplace_back(input.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskDataPar->outputs_count.emplace_back(wordsFound.size()); - } - - chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector referenceWordFound(1, 0); - - // Create TaskData - std::shared_ptr taskDataSequential = std::make_shared(); - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); - - // Create Task - chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordsFound[0], referenceWordFound[0]); - } -} - -// Test to verify the MPI word counting function with an input string that generates 450 words -TEST(chastov_v_count_words_in_line_mpi, words_300) { - boost::mpi::communicator world; - std::vector input = createTestInput(20); - std::vector wordsFound(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataPar->inputs_count.emplace_back(input.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskDataPar->outputs_count.emplace_back(wordsFound.size()); - } - - chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector referenceWordFound(1, 0); - - // Create TaskData - std::shared_ptr taskDataSequential = std::make_shared(); - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); - - // Create Task - chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordsFound[0], referenceWordFound[0]); - } -} - -// Test to verify the MPI word counting function with an input string that generates 1500 words -TEST(chastov_v_count_words_in_line_mpi, words_1500) { - boost::mpi::communicator world; - std::vector input = createTestInput(100); - std::vector wordsFound(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataPar->inputs_count.emplace_back(input.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskDataPar->outputs_count.emplace_back(wordsFound.size()); - } - - chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector referenceWordFound(1, 0); - - // Create TaskData - std::shared_ptr taskDataSequential = std::make_shared(); - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); - - // Create Task - chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordsFound[0], referenceWordFound[0]); - } -} - -// Test to verify the MPI word counting function with an input string that generates 7500 words -TEST(chastov_v_count_words_in_line_mpi, words_7500) { - boost::mpi::communicator world; - std::vector input = createTestInput(500); - std::vector wordsFound(1, 0); - - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataPar->inputs_count.emplace_back(input.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskDataPar->outputs_count.emplace_back(wordsFound.size()); - } - - chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector referenceWordFound(1, 0); - - // Create TaskData - std::shared_ptr taskDataSequential = std::make_shared(); - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); - - // Create Task - chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordsFound[0], referenceWordFound[0]); - } -} - -// The test tests the functionality of counting words in a string with many spaces between words -TEST(chastov_v_count_words_in_line_mpi, multiple_spaces) { - boost::mpi::communicator world; - std::vector input = {'T', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't'}; - std::vector wordsFound(1, 0); - auto taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataPar->inputs_count.emplace_back(input.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskDataPar->outputs_count.emplace_back(wordsFound.size()); - } - - auto testTaskParallel = std::make_shared(taskDataPar); - ASSERT_TRUE(testTaskParallel->validation()); - testTaskParallel->pre_processing(); - testTaskParallel->run(); - testTaskParallel->post_processing(); - - if (world.rank() == 0) { - ASSERT_EQ(wordsFound[0], 4); - } -} - -// Test checks the word count in a string with multiple spaces between words -TEST(chastov_v_count_words_in_line_mpi, multiple_consecutive_spaces) { - boost::mpi::communicator world; - std::vector input; - std::string testString = "Hello world MPI"; - for (unsigned long int j = 0; j < testString.length(); j++) { - input.push_back(testString[j]); - } - std::vector wordsFound(1, 0); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataPar->inputs_count.emplace_back(input.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskDataPar->outputs_count.emplace_back(wordsFound.size()); - } - - chastov_v_count_words_in_line_mpi::TestMPITaskParallel testTaskParallel(taskDataPar); - ASSERT_TRUE(testTaskParallel.validation()); - testTaskParallel.pre_processing(); - testTaskParallel.run(); - testTaskParallel.post_processing(); - - if (world.rank() == 0) { - // Create data - std::vector referenceWordFound(1, 0); - - // Create TaskData - std::shared_ptr taskDataSequential = std::make_shared(); - taskDataSequential->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskDataSequential->inputs_count.emplace_back(input.size()); - taskDataSequential->outputs.emplace_back(reinterpret_cast(referenceWordFound.data())); - taskDataSequential->outputs_count.emplace_back(referenceWordFound.size()); - - // Create Task - chastov_v_count_words_in_line_mpi::TestMPITaskSequential testTaskSequential(taskDataSequential); - ASSERT_TRUE(testTaskSequential.validation()); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - - ASSERT_EQ(wordsFound[0], referenceWordFound[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp b/tasks/mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp deleted file mode 100644 index c075b62d385..00000000000 --- a/tasks/mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2024 Chastov Vyacheslav -#pragma once - -#include - -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace chastov_v_count_words_in_line_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - int wordsFound{}; - int spacesFound{}; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - int localSpaceFound{}; - int wordsFound{}; - int spacesFound{}; - boost::mpi::communicator world; -}; - -} // namespace chastov_v_count_words_in_line_mpi \ No newline at end of file diff --git a/tasks/mpi/chastov_v_count_words_in_line/perf_tests/main.cpp b/tasks/mpi/chastov_v_count_words_in_line/perf_tests/main.cpp deleted file mode 100644 index b0912694e98..00000000000 --- a/tasks/mpi/chastov_v_count_words_in_line/perf_tests/main.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2024 Chastov Vyacheslav -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp" - -std::vector createTestInput(int n) { - std::vector wordCountInput; - std::string testString = "This is a proposal to evaluate the performance of a word counting algorithm via MPI. "; - for (int i = 0; i < n; i++) { - for (unsigned long int j = 0; j < testString.length(); j++) { - wordCountInput.push_back(testString[j]); - } - } - return wordCountInput; -} - -std::vector wordCountInput = createTestInput(2000); - -TEST(chastov_v_count_words_in_line_mpi, test_pipeline_run) { - boost::mpi::communicator world; - std::vector input = wordCountInput; - std::vector wordsFound(1, 0); - // Create TaskData - std::shared_ptr taskData = std::make_shared(); - - if (world.rank() == 0) { - taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskData->outputs_count.emplace_back(wordsFound.size()); - } - - auto testMpiTaskParallel = std::make_shared(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 1000; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(wordsFound[0], 30000); - } -} - -TEST(chastov_v_count_words_in_line_mpi, test_task_run) { - boost::mpi::communicator world; - std::vector input = wordCountInput; - std::vector wordsFound(1, 0); - - std::shared_ptr taskData = std::make_shared(); - - if (world.rank() == 0) { - taskData->inputs.emplace_back(reinterpret_cast(const_cast(input.data()))); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(wordsFound.data())); - taskData->outputs_count.emplace_back(wordsFound.size()); - } - - auto testTask = std::make_shared(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 1000; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(testTask); - perfAnalyzer->task_run(perfAttr, perfResults); - - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(wordsFound[0], 30000); - } -} \ No newline at end of file diff --git a/tasks/mpi/chastov_v_count_words_in_line/src/ops_mpi.cpp b/tasks/mpi/chastov_v_count_words_in_line/src/ops_mpi.cpp deleted file mode 100644 index 69778a4f9b3..00000000000 --- a/tasks/mpi/chastov_v_count_words_in_line/src/ops_mpi.cpp +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2024 Chastov Vyacheslav -#include "mpi/chastov_v_count_words_in_line/include/ops_mpi.hpp" - -bool chastov_v_count_words_in_line_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - input_ = std::vector(taskData->inputs_count[0]); - auto* temp = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = temp[i]; - } - return true; -} - -bool chastov_v_count_words_in_line_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; -} - -bool chastov_v_count_words_in_line_mpi::TestMPITaskSequential::run() { - internal_order_test(); - for (char c : input_) { - if (c == ' ') { - spacesFound++; - } - } - wordsFound = spacesFound + 1; - return true; -} - -bool chastov_v_count_words_in_line_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = wordsFound; - return true; -} - -bool chastov_v_count_words_in_line_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - return true; -} - -bool chastov_v_count_words_in_line_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - return (world.rank() == 0) ? (taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1) : true; -} - -bool chastov_v_count_words_in_line_mpi::TestMPITaskParallel::run() { - internal_order_test(); - unsigned int blockSize = 0; - if (world.rank() == 0) { - input_ = std ::vector(taskData->inputs_count[0]); - auto* tmp = reinterpret_cast(taskData->inputs[0]); - for (unsigned long int i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp[i]; - } - blockSize = taskData->inputs_count[0] / world.size(); - } - boost::mpi::broadcast(world, blockSize, 0); - - local_input_.resize(blockSize); - if (world.rank() == 0) { - for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, input_.data() + proc * blockSize, blockSize); - } - local_input_ = std::vector(input_.begin(), input_.begin() + blockSize); - } else { - world.recv(0, 0, local_input_.data(), blockSize); - } - for (char c : local_input_) { - if (c == ' ') { - localSpaceFound++; - } - } - boost::mpi::reduce(world, localSpaceFound, spacesFound, std::plus<>(), 0); - if (world.rank() == 0) { - wordsFound = spacesFound + 1; - } - return true; -} - -bool chastov_v_count_words_in_line_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = wordsFound; - } - return true; -} \ No newline at end of file diff --git a/tasks/seq/chastov_v_count_words_in_line/func_tests/main.cpp b/tasks/seq/chastov_v_count_words_in_line/func_tests/main.cpp deleted file mode 100644 index 2a667d04383..00000000000 --- a/tasks/seq/chastov_v_count_words_in_line/func_tests/main.cpp +++ /dev/null @@ -1,185 +0,0 @@ -#include - -#include "seq/chastov_v_count_words_in_line/include/ops_seq.hpp" - -std::vector createTestInput(int n) { - std::vector wordCountInput; - std::string firstSentence = "Hello my name is Slava. Now I am a third year student at Lobachevsky University. "; - for (int i = 0; i < n - 1; i++) { - for (unsigned long int j = 0; j < firstSentence.length(); j++) { - wordCountInput.push_back(firstSentence[j]); - } - } - std::string lastSentence = "This is a proposal to evaluate the performance of a word counting algorithm via MPI."; - for (unsigned long int j = 0; j < lastSentence.length(); j++) { - wordCountInput.push_back(lastSentence[j]); - } - return wordCountInput; -} - -// Test case to check the behavior of the word counting function when given an empty string -TEST(chastov_v_count_words_in_line_seq, empty_string) { - std::vector input = {}; - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), false); -} - -// Test case to verify that the function correctly identifies that a string consisting only of spaces -TEST(chastov_v_count_words_in_line_seq, handles_only_spaces) { - std::vector inputData = {' ', ' ', ' '}; - std::vector outputData(1, 0); - - auto taskDataPtr = std::make_shared(); - taskDataPtr->inputs.emplace_back(reinterpret_cast(inputData.data())); - taskDataPtr->inputs_count.emplace_back(inputData.size()); - taskDataPtr->outputs.emplace_back(reinterpret_cast(outputData.data())); - taskDataPtr->outputs_count.emplace_back(outputData.size()); - - chastov_v_count_words_in_line_seq::TestTaskSequential wordCountTask(taskDataPtr); - ASSERT_TRUE(wordCountTask.validation()); - wordCountTask.pre_processing(); - wordCountTask.run(); - wordCountTask.post_processing(); - - ASSERT_EQ(outputData[0], 0); -} - -// Test case to check the counting functionality for a single word input -TEST(chastov_v_count_words_in_line_seq, word_1) { - std::vector input; - std::string testString = "hello"; - for (unsigned long int j = 0; j < testString.length(); j++) { - input.push_back(testString[j]); - } - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), true); - testTask.pre_processing(); - testTask.run(); - testTask.post_processing(); - - ASSERT_EQ(out[0], 1); -} - -// Test case for counting the number of words in a four word sentence -TEST(chastov_v_count_words_in_line_seq, words_4) { - std::vector input; - std::string testString = "My name is Slava"; - for (unsigned long int j = 0; j < testString.length(); j++) { - input.push_back(testString[j]); - } - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), true); - testTask.pre_processing(); - testTask.run(); - testTask.post_processing(); - - ASSERT_EQ(out[0], 4); -} - -// Test case to verify the function's ability to handle larger input sizes -// The generated string should contain enough words to yield a count of 450 -TEST(chastov_v_count_words_in_line_seq, words_450) { - std::vector input = createTestInput(30); - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), true); - testTask.pre_processing(); - testTask.run(); - testTask.post_processing(); - - ASSERT_EQ(out[0], 450); -} - -// Test case to check the performance and correctness for an even larger input size -// The created string should contain enough words to yield a count of 1500 -TEST(chastov_v_count_words_in_line_seq, words_1500) { - std::vector input = createTestInput(100); - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), true); - testTask.pre_processing(); - testTask.run(); - testTask.post_processing(); - - ASSERT_EQ(out[0], 1500); -} - -// Test case to evaluate the handling of a very large number of words -// The generated string should be such that the word count is expected to be 7500 -TEST(chastov_v_count_words_in_line_seq, words_7500) { - std::vector input = createTestInput(500); - std::vector out(1, 0); - - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(out.data())); - taskData->outputs_count.emplace_back(out.size()); - - chastov_v_count_words_in_line_seq::TestTaskSequential testTask(taskData); - ASSERT_EQ(testTask.validation(), true); - testTask.pre_processing(); - testTask.run(); - testTask.post_processing(); - - ASSERT_EQ(out[0], 7500); -} - -// Test case to check the counting of words that include special characters -// The input contains two words separated by a space, and the expected output is 2 -TEST(chastov_v_count_words_in_line_seq, words_with_special_characters) { - std::vector inputData = {'W', 'o', 'r', 'd', '@', '1', ' ', 'W', 'o', 'r', 'd', '#', '2'}; - std::vector outputData(1, 0); - - auto taskDataPtr = std::make_shared(); - taskDataPtr->inputs.emplace_back(reinterpret_cast(inputData.data())); - taskDataPtr->inputs_count.emplace_back(inputData.size()); - taskDataPtr->outputs.emplace_back(reinterpret_cast(outputData.data())); - taskDataPtr->outputs_count.emplace_back(outputData.size()); - - chastov_v_count_words_in_line_seq::TestTaskSequential wordCountTask(taskDataPtr); - ASSERT_TRUE(wordCountTask.validation()); - wordCountTask.pre_processing(); - wordCountTask.run(); - wordCountTask.post_processing(); - - ASSERT_EQ(outputData[0], 2); -} \ No newline at end of file diff --git a/tasks/seq/chastov_v_count_words_in_line/include/ops_seq.hpp b/tasks/seq/chastov_v_count_words_in_line/include/ops_seq.hpp deleted file mode 100644 index 7e9158c2ddd..00000000000 --- a/tasks/seq/chastov_v_count_words_in_line/include/ops_seq.hpp +++ /dev/null @@ -1,26 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace chastov_v_count_words_in_line_seq { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector inputString; - int wordsFound{}; - int spacesFound{}; -}; - -} // namespace chastov_v_count_words_in_line_seq \ No newline at end of file diff --git a/tasks/seq/chastov_v_count_words_in_line/perf_tests/main.cpp b/tasks/seq/chastov_v_count_words_in_line/perf_tests/main.cpp deleted file mode 100644 index 4bfad5ccb47..00000000000 --- a/tasks/seq/chastov_v_count_words_in_line/perf_tests/main.cpp +++ /dev/null @@ -1,93 +0,0 @@ -#include - -#include "core/perf/include/perf.hpp" -#include "seq/chastov_v_count_words_in_line/include/ops_seq.hpp" - -std::vector createTestInput(int n) { - std::vector wordCountInput; - std::string firstSentence = "Hello my name is Slava. Now I am a third year student at Lobachevsky University. "; - for (int i = 0; i < n - 1; i++) { - for (unsigned long int j = 0; j < firstSentence.length(); j++) { - wordCountInput.push_back(firstSentence[j]); - } - } - std::string lastSentence = "This is a proposal to evaluate the performance of a word counting algorithm via MPI."; - for (unsigned long int j = 0; j < lastSentence.length(); j++) { - wordCountInput.push_back(lastSentence[j]); - } - return wordCountInput; -} - -std::vector wordCountInput = createTestInput(1000); - -TEST(word_count_seq, test_pipeline_run) { - // Create data - std::vector input = wordCountInput; - std::vector word_count(1, 0); - - // Create TaskData - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); - taskData->outputs_count.emplace_back(word_count.size()); - - // Create Task - auto testTask = std::make_shared(taskData); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 1000; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTask); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - ASSERT_EQ(word_count[0], 15000); -} - -TEST(word_count_seq, test_task_run) { - // Create data - std::vector input = wordCountInput; - std::vector word_count(1, 0); - - // Create TaskData - std::shared_ptr taskData = std::make_shared(); - taskData->inputs.emplace_back(reinterpret_cast(input.data())); - taskData->inputs_count.emplace_back(input.size()); - taskData->outputs.emplace_back(reinterpret_cast(word_count.data())); - taskData->outputs_count.emplace_back(word_count.size()); - - // Create Task - auto testTask = std::make_shared(taskData); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 1000; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTask); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - ASSERT_EQ(word_count[0], 15000); -} \ No newline at end of file diff --git a/tasks/seq/chastov_v_count_words_in_line/src/ops_seq.cpp b/tasks/seq/chastov_v_count_words_in_line/src/ops_seq.cpp deleted file mode 100644 index 9193351eed5..00000000000 --- a/tasks/seq/chastov_v_count_words_in_line/src/ops_seq.cpp +++ /dev/null @@ -1,46 +0,0 @@ -#include "seq/chastov_v_count_words_in_line/include/ops_seq.hpp" - -bool chastov_v_count_words_in_line_seq::TestTaskSequential::pre_processing() { - internal_order_test(); - inputString = std::vector(taskData->inputs_count[0]); - auto* tmp = reinterpret_cast(taskData->inputs[0]); - for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { - inputString[i] = tmp[i]; - } - return true; -} - -bool chastov_v_count_words_in_line_seq::TestTaskSequential::validation() { - internal_order_test(); - return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; -} - -bool chastov_v_count_words_in_line_seq::TestTaskSequential::run() { - internal_order_test(); - spacesFound = 0; - wordsFound = 0; - - bool inWord = false; - - for (char c : inputString) { - if (std::isspace(c) != 0) { - if (inWord) { - inWord = false; - spacesFound++; - } - } else { - if (!inWord) { - inWord = true; - wordsFound++; - } - } - } - - return true; -} - -bool chastov_v_count_words_in_line_seq::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = wordsFound; - return true; -} \ No newline at end of file From 036635312dd750c5472afaf3c8f38f49eebaa472 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 7 Nov 2024 17:36:46 +0100 Subject: [PATCH 143/155] =?UTF-8?q?Revert=20"=D0=9A=D0=BE=D0=B2=D0=B0?= =?UTF-8?q?=D0=BB=D1=8C=D1=87=D1=83=D0=BA=20=D0=90=D0=BB=D0=B5=D0=BA=D1=81?= =?UTF-8?q?=D0=B0=D0=BD=D0=B4=D1=80.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87?= =?UTF-8?q?=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2013.?= =?UTF-8?q?=20=20=D0=9C=D0=B0=D0=BA=D1=81=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C?= =?UTF-8?q?=D0=BD=D0=BE=D0=B5=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B."=20(#253)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#181 @KADCHDR https://github.com/learning-process/ppc-2024-autumn/actions/runs/11727112253/job/32667390704 --- .../func_tests/main.cpp | 379 ------------------ .../include/ops_mpi.hpp | 47 --- .../perf_tests/main.cpp | 118 ------ .../src/ops_mpi.cpp | 134 ------- .../func_tests/main.cpp | 262 ------------ .../include/ops_seq.hpp | 31 -- .../perf_tests/main.cpp | 118 ------ .../src/ops_seq.cpp | 55 --- 8 files changed, 1144 deletions(-) delete mode 100644 tasks/mpi/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp delete mode 100644 tasks/mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp delete mode 100644 tasks/mpi/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp delete mode 100644 tasks/mpi/kovalchuk_a_max_of_vector_elements/src/ops_mpi.cpp delete mode 100644 tasks/seq/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp delete mode 100644 tasks/seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp delete mode 100644 tasks/seq/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp delete mode 100644 tasks/seq/kovalchuk_a_max_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp b/tasks/mpi/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp deleted file mode 100644 index 3390a8aa72e..00000000000 --- a/tasks/mpi/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include -#include - -#include "mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp" - -using namespace kovalchuk_a_max_of_vector_elements; - -std::vector getRandomVector(int sz, int min = MINIMALGEN, int max = MAXIMUMGEN); -std::vector> getRandomMatrix(int rows, int columns, int min = MINIMALGEN, int max = MAXIMUMGEN); - -std::vector getRandomVector(int sz, int min, int max) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = min + gen() % (max - min + 1); - } - return vec; -} - -std::vector> getRandomMatrix(int rows, int columns, int min, int max) { - std::vector> vec(rows); - for (int i = 0; i < rows; i++) { - vec[i] = getRandomVector(columns, min, max); - } - return vec; -} - -TEST(kovalchuk_a_max_of_vector_elements, Test_Max_10_10) { - const int count_rows = 10; - const int count_columns = 10; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::mt19937 gen(42); - global_matrix = getRandomMatrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(kovalchuk_a_max_of_vector_elements, Test_Max_50x20) { - const int count_rows = 50; - const int count_columns = 20; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::mt19937 gen(42); - global_matrix = getRandomMatrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(kovalchuk_a_max_of_vector_elements, Test_Max_100_100) { - const int count_rows = 100; - const int count_columns = 100; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::mt19937 gen(42); - global_matrix = getRandomMatrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(kovalchuk_a_max_of_vector_elements, Test_Max_1_100) { - const int count_rows = 1; - const int count_columns = 100; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::mt19937 gen(42); - global_matrix = getRandomMatrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} - -TEST(kovalchuk_a_max_of_vector_elements, Test_Max_Empty_Matrix) { - const int count_rows = 0; - const int count_columns = 0; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - global_matrix = getRandomMatrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} -TEST(kovalchuk_a_max_of_vector_elements, Test_Max_4_4) { - const int count_rows = 4; - const int count_columns = 4; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - global_matrix = getRandomMatrix(count_rows, count_columns); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} -TEST(kovalchuk_a_max_of_vector_elements, Test_Max_Negative_Values) { - const int count_rows = 10; - const int count_columns = 10; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::mt19937 gen(42); - global_matrix = getRandomMatrix(count_rows, count_columns, -100, -1); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} -TEST(kovalchuk_a_max_of_vector_elements, Test_Max_Same_Values) { - const int count_rows = 10; - const int count_columns = 10; - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::mt19937 gen(42); - global_matrix = getRandomMatrix(count_rows, count_columns, 20, 20); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - kovalchuk_a_max_of_vector_elements::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - if (world.rank() == 0) { - // Create data - std::vector reference_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); - taskDataSeq->outputs_count.emplace_back(reference_max.size()); - // Create Task - kovalchuk_a_max_of_vector_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); - ASSERT_EQ(testMpiTaskSequential.validation(), true); - testMpiTaskSequential.pre_processing(); - testMpiTaskSequential.run(); - testMpiTaskSequential.post_processing(); - ASSERT_EQ(reference_max[0], global_max[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp deleted file mode 100644 index a4c6af014d4..00000000000 --- a/tasks/mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace kovalchuk_a_max_of_vector_elements { - -const int MINIMALGEN = -999; -const int MAXIMUMGEN = 999; - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - int res_{}; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - int res_{}; - boost::mpi::communicator world; -}; - -} // namespace kovalchuk_a_max_of_vector_elements \ No newline at end of file diff --git a/tasks/mpi/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp deleted file mode 100644 index 8b5f7f7e7d7..00000000000 --- a/tasks/mpi/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp" - -using namespace kovalchuk_a_max_of_vector_elements; - -std::vector getRandomVector(int sz, int min = MINIMALGEN, int max = MAXIMUMGEN); -std::vector> getRandomMatrix(int rows, int columns, int min = MINIMALGEN, int max = MAXIMUMGEN); - -std::vector getRandomVector(int sz, int min, int max) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = min + gen() % (max - min + 1); - } - return vec; -} - -std::vector> getRandomMatrix(int rows, int columns, int min, int max) { - std::vector> vec(rows); - for (int i = 0; i < rows; i++) { - vec[i] = getRandomVector(columns, min, max); - } - return vec; -} - -TEST(kovalchuk_a_max_of_vector_elements, test_pipeline_run) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - int ref = INT_MAX; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::random_device dev; - std::mt19937 gen(dev()); - int count_rows = 9999; - int count_columns = 9999; - global_matrix = getRandomMatrix(count_rows, count_columns); - size_t index = gen() % (static_cast(count_rows) * count_columns); - global_matrix[index / count_columns][index % count_columns] = ref; - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - // Create and init perf results - auto perfResults = std::make_shared(); - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ref, global_max[0]); - } -} - -TEST(kovalchuk_a_max_of_vector_elements, test_task_run) { - boost::mpi::communicator world; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - int ref = INT_MAX; - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - std::random_device dev; - std::mt19937 gen(dev()); - int count_rows = 3; - int count_columns = 3; - global_matrix = getRandomMatrix(count_rows, count_columns); - int index = gen() % (count_rows * count_columns); - global_matrix[index / count_columns][index / count_rows] = ref; - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataPar->inputs_count.emplace_back(count_rows); - taskDataPar->inputs_count.emplace_back(count_columns); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataPar->outputs_count.emplace_back(global_max.size()); - } - auto testMpiTaskParallel = std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - // Create and init perf results - auto perfResults = std::make_shared(); - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->task_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ref, global_max[0]); - } -} diff --git a/tasks/mpi/kovalchuk_a_max_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/kovalchuk_a_max_of_vector_elements/src/ops_mpi.cpp deleted file mode 100644 index d2e99c35caa..00000000000 --- a/tasks/mpi/kovalchuk_a_max_of_vector_elements/src/ops_mpi.cpp +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "mpi/kovalchuk_a_max_of_vector_elements/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include - -bool kovalchuk_a_max_of_vector_elements::TestMPITaskSequential::pre_processing() { - internal_order_test(); - // Init vectors - if (taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0) { - input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); - for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { - auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); - std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], input_[i].begin()); - } - } else { - input_ = std::vector>(); - } - // Init value for output - res_ = INT_MIN; - return true; -} - -bool kovalchuk_a_max_of_vector_elements::TestMPITaskSequential::validation() { - internal_order_test(); - // Check count elements of output - return taskData->outputs_count[0] == 1; -} - -bool kovalchuk_a_max_of_vector_elements::TestMPITaskSequential::run() { - internal_order_test(); - if (!input_.empty()) { - std::vector local_res(input_.size()); - for (unsigned int i = 0; i < input_.size(); i++) { - if (!input_[i].empty()) { - local_res[i] = *std::max_element(input_[i].begin(), input_[i].end()); - } else { - local_res[i] = INT_MIN; - } - } - res_ = *std::max_element(local_res.begin(), local_res.end()); - } else { - res_ = INT_MIN; - } - return true; -} - -bool kovalchuk_a_max_of_vector_elements::TestMPITaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res_; - return true; -} - -bool kovalchuk_a_max_of_vector_elements::TestMPITaskParallel::pre_processing() { - internal_order_test(); - unsigned int delta = 0; - if (world.rank() == 0) { - if (taskData->inputs_count[0] == 0 || taskData->inputs_count[1] == 0) { - delta = 0; - } else { - delta = std::max(1u, taskData->inputs_count[0] * taskData->inputs_count[1] / world.size()); - } - if (taskData->inputs_count[0] == 1 && taskData->inputs_count[1] == 1) { - delta = 1; - } - } - - broadcast(world, delta, 0); - - if (world.rank() == 0) { - // Init vectors - unsigned int rows = taskData->inputs_count[0]; - unsigned int columns = taskData->inputs_count[1]; - if (rows > 0 && columns > 0) { - input_ = std::vector(rows * columns); - for (unsigned int i = 0; i < rows; i++) { - auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); - for (unsigned int j = 0; j < columns; j++) { - input_[i * columns + j] = tmp_ptr[j]; - } - } - if (delta > 0) { - for (int proc = 1; proc < world.size(); proc++) { - std::span buffer(input_.data() + delta * proc, delta); - world.send(proc, 0, buffer.data(), buffer.size()); - } - } - } else { - for (int proc = 1; proc < world.size(); proc++) { - world.send(proc, 0, nullptr, 0); - } - } - } - - local_input_ = std::vector(delta); - if (world.rank() == 0) { - if (!input_.empty()) { - local_input_ = std::vector(input_.begin(), input_.begin() + delta); - } - } else { - world.recv(0, 0, local_input_.data(), delta); - } - - // Init value for output - res_ = INT_MIN; - return true; -} - -bool kovalchuk_a_max_of_vector_elements::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - // Check count elements of output - return taskData->outputs_count[0] == 1; - } - return true; -} - -bool kovalchuk_a_max_of_vector_elements::TestMPITaskParallel::run() { - internal_order_test(); - int local_res = local_input_.empty() ? INT_MIN : *std::max_element(local_input_.begin(), local_input_.end()); - reduce(world, local_res, res_, boost::mpi::maximum(), 0); - return true; -} - -bool kovalchuk_a_max_of_vector_elements::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - reinterpret_cast(taskData->outputs[0])[0] = res_; - } - return true; -} \ No newline at end of file diff --git a/tasks/seq/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp b/tasks/seq/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp deleted file mode 100644 index 43551c6112b..00000000000 --- a/tasks/seq/kovalchuk_a_max_of_vector_elements/func_tests/main.cpp +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include - -#include "seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp" - -using namespace kovalchuk_a_max_of_vector_elements_seq; - -std::vector getRandomVector(int sz, int min = MINIMALGEN, int max = MAXIMUMGEN); -std::vector> getRandomMatrix(int rows, int columns, int min = MINIMALGEN, int max = MAXIMUMGEN); - -std::vector getRandomVector(int sz, int min, int max) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = min + gen() % (max - min + 1); - } - return vec; -} - -std::vector> getRandomMatrix(int rows, int columns, int min, int max) { - std::vector> vec(rows); - for (int i = 0; i < rows; i++) { - vec[i] = getRandomVector(columns, min, max); - } - return vec; -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_10_10) { - const int count_rows = 10; - const int count_columns = 10; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - global_matrix = getRandomMatrix(count_rows, count_columns); - std::random_device dev; - std::mt19937 gen(dev()); - int index = gen() % (count_rows * count_columns); - global_matrix[index / count_columns][index % count_columns] = INT_MAX; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - TestSequentialTask testSequentialTask(taskDataSeq); - ASSERT_EQ(testSequentialTask.validation(), true); - testSequentialTask.pre_processing(); - testSequentialTask.run(); - testSequentialTask.post_processing(); - - ASSERT_EQ(global_max[0], INT_MAX); -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_50_20) { - const int count_rows = 50; - const int count_columns = 20; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - global_matrix = getRandomMatrix(count_rows, count_columns); - std::random_device dev; - std::mt19937 gen(dev()); - int index = gen() % (count_rows * count_columns); - global_matrix[index / count_columns][index % count_columns] = INT_MAX; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - TestSequentialTask testSequentialTask(taskDataSeq); - ASSERT_EQ(testSequentialTask.validation(), true); - testSequentialTask.pre_processing(); - testSequentialTask.run(); - testSequentialTask.post_processing(); - - ASSERT_EQ(global_max[0], INT_MAX); -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_100_100) { - const int count_rows = 100; - const int count_columns = 100; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - global_matrix = getRandomMatrix(count_rows, count_columns); - std::random_device dev; - std::mt19937 gen(dev()); - int index = gen() % (count_rows * count_columns); - global_matrix[index / count_columns][index % count_columns] = INT_MAX; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - TestSequentialTask testSequentialTask(taskDataSeq); - ASSERT_EQ(testSequentialTask.validation(), true); - testSequentialTask.pre_processing(); - testSequentialTask.run(); - testSequentialTask.post_processing(); - - ASSERT_EQ(global_max[0], INT_MAX); -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_1_100) { - const int count_rows = 1; - const int count_columns = 100; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - global_matrix = getRandomMatrix(count_rows, count_columns); - std::random_device dev; - std::mt19937 gen(dev()); - int index = gen() % (count_rows * count_columns); - global_matrix[index / count_columns][index % count_columns] = INT_MAX; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - TestSequentialTask testSequentialTask(taskDataSeq); - ASSERT_EQ(testSequentialTask.validation(), true); - testSequentialTask.pre_processing(); - testSequentialTask.run(); - testSequentialTask.post_processing(); - - ASSERT_EQ(global_max[0], INT_MAX); -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_Empty_Matrix) { - const int count_rows = 10; - const int count_columns = 10; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - global_matrix = getRandomMatrix(count_rows, count_columns); - std::random_device dev; - std::mt19937 gen(dev()); - int index = gen() % (count_rows * count_columns); - global_matrix[index / count_columns][index % count_columns] = INT_MAX; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - TestSequentialTask testSequentialTask(taskDataSeq); - ASSERT_EQ(testSequentialTask.validation(), true); - testSequentialTask.pre_processing(); - testSequentialTask.run(); - testSequentialTask.post_processing(); - - ASSERT_EQ(global_max[0], INT_MAX); -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_4_4) { - const int count_rows = 4; - const int count_columns = 4; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - global_matrix = getRandomMatrix(count_rows, count_columns); - std::random_device dev; - std::mt19937 gen(dev()); - int index = gen() % (count_rows * count_columns); - global_matrix[index / count_columns][index % count_columns] = INT_MAX; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - TestSequentialTask testSequentialTask(taskDataSeq); - ASSERT_EQ(testSequentialTask.validation(), true); - testSequentialTask.pre_processing(); - testSequentialTask.run(); - testSequentialTask.post_processing(); - - ASSERT_EQ(global_max[0], INT_MAX); -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_Negative_Values) { - const int count_rows = 1; - const int count_columns = 100; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - global_matrix = getRandomMatrix(count_rows, count_columns, -1, -999); - std::random_device dev; - std::mt19937 gen(dev()); - int index = gen() % (count_rows * count_columns); - global_matrix[index / count_columns][index % count_columns] = INT_MAX; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - TestSequentialTask testSequentialTask(taskDataSeq); - ASSERT_EQ(testSequentialTask.validation(), true); - testSequentialTask.pre_processing(); - testSequentialTask.run(); - testSequentialTask.post_processing(); - - ASSERT_EQ(global_max[0], INT_MAX); -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, Test_Max_Same_Values) { - const int count_rows = 10; - const int count_columns = 100; - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - global_matrix = getRandomMatrix(count_rows, count_columns, 20, 20); - std::random_device dev; - std::mt19937 gen(dev()); - int index = gen() % (count_rows * count_columns); - global_matrix[index / count_columns][index % count_columns] = INT_MAX; - - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - TestSequentialTask testSequentialTask(taskDataSeq); - ASSERT_EQ(testSequentialTask.validation(), true); - testSequentialTask.pre_processing(); - testSequentialTask.run(); - testSequentialTask.post_processing(); - - ASSERT_EQ(global_max[0], INT_MAX); -} \ No newline at end of file diff --git a/tasks/seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp b/tasks/seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp deleted file mode 100644 index ffb8c534b3e..00000000000 --- a/tasks/seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once -#include - -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace kovalchuk_a_max_of_vector_elements_seq { - -const int MINIMALGEN = -99; -const int MAXIMUMGEN = 99; - -class TestSequentialTask : public ppc::core::Task { - public: - explicit TestSequentialTask(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector> input_; - int res_{}; -}; - -} // namespace kovalchuk_a_max_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp b/tasks/seq/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp deleted file mode 100644 index 0650d9210fe..00000000000 --- a/tasks/seq/kovalchuk_a_max_of_vector_elements/perf_tests/main.cpp +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include -#include -#include - -#include "core/perf/include/perf.hpp" -#include "seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp" - -using namespace kovalchuk_a_max_of_vector_elements_seq; - -std::vector getRandomVector(int sz, int min = MINIMALGEN, int max = MAXIMUMGEN); -std::vector> getRandomMatrix(int rows, int columns, int min = MINIMALGEN, int max = MAXIMUMGEN); - -std::vector getRandomVector(int sz, int min, int max) { - std::random_device dev; - std::mt19937 gen(dev()); - std::vector vec(sz); - for (int i = 0; i < sz; i++) { - vec[i] = min + gen() % (max - min + 1); - } - return vec; -} - -std::vector> getRandomMatrix(int rows, int columns, int min, int max) { - std::vector> vec(rows); - for (int i = 0; i < rows; i++) { - vec[i] = getRandomVector(columns, min, max); - } - return vec; -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, test_pipeline_run) { - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - int ref = INT_MAX; - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - std::random_device dev; - std::mt19937 gen(dev()); - int count_rows = 9999; - int count_columns = 9999; - global_matrix = getRandomMatrix(count_rows, count_columns); - size_t index = gen() % (static_cast(count_rows) * count_columns); - global_matrix[index / count_columns][index % count_columns] = ref; - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - auto testSequentialTask = std::make_shared(taskDataSeq); - ASSERT_EQ(testSequentialTask->validation(), true); - testSequentialTask->pre_processing(); - testSequentialTask->run(); - testSequentialTask->post_processing(); - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const std::chrono::high_resolution_clock::time_point start_time = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - return std::chrono::duration_cast>(std::chrono::high_resolution_clock::now() - - start_time) - .count(); - }; - // Create and init perf results - auto perfResults = std::make_shared(); - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testSequentialTask); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ref, global_max[0]); -} - -TEST(kovalchuk_a_max_of_vector_elements_seq, test_task_run) { - std::vector> global_matrix; - std::vector global_max(1, INT_MIN); - int ref = INT_MAX; - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - std::random_device dev; - std::mt19937 gen(dev()); - int count_rows = 3; - int count_columns = 3; - global_matrix = getRandomMatrix(count_rows, count_columns); - size_t index = gen() % (static_cast(count_rows) * count_columns); - global_matrix[index / count_columns][index % count_columns] = ref; - for (unsigned int i = 0; i < global_matrix.size(); i++) - taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); - taskDataSeq->inputs_count.emplace_back(count_rows); - taskDataSeq->inputs_count.emplace_back(count_columns); - taskDataSeq->outputs.emplace_back(reinterpret_cast(global_max.data())); - taskDataSeq->outputs_count.emplace_back(global_max.size()); - // Create Task - auto testSequentialTask = std::make_shared(taskDataSeq); - ASSERT_EQ(testSequentialTask->validation(), true); - testSequentialTask->pre_processing(); - testSequentialTask->run(); - testSequentialTask->post_processing(); - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const std::chrono::high_resolution_clock::time_point start_time = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - return std::chrono::duration_cast>(std::chrono::high_resolution_clock::now() - - start_time) - .count(); - }; - // Create and init perf results - auto perfResults = std::make_shared(); - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testSequentialTask); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(ref, global_max[0]); -} \ No newline at end of file diff --git a/tasks/seq/kovalchuk_a_max_of_vector_elements/src/ops_seq.cpp b/tasks/seq/kovalchuk_a_max_of_vector_elements/src/ops_seq.cpp deleted file mode 100644 index d2f8bbcd13a..00000000000 --- a/tasks/seq/kovalchuk_a_max_of_vector_elements/src/ops_seq.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include "seq/kovalchuk_a_max_of_vector_elements/include/ops_seq.hpp" - -#include -#include -#include -#include -#include - -bool kovalchuk_a_max_of_vector_elements_seq::TestSequentialTask::pre_processing() { - internal_order_test(); - // Init vectors - if (taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0) { - input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); - for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { - auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); - std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], input_[i].begin()); - } - } else { - input_ = std::vector>(); - } - // Init value for output - res_ = INT_MIN; - return true; -} - -bool kovalchuk_a_max_of_vector_elements_seq::TestSequentialTask::validation() { - internal_order_test(); - // Check count elements of output - return taskData->outputs_count[0] == 1; -} - -bool kovalchuk_a_max_of_vector_elements_seq::TestSequentialTask::run() { - internal_order_test(); - if (!input_.empty()) { - std::vector local_res(input_.size()); - for (unsigned int i = 0; i < input_.size(); i++) { - if (!input_[i].empty()) { - local_res[i] = *std::max_element(input_[i].begin(), input_[i].end()); - } else { - local_res[i] = INT_MIN; - } - } - res_ = *std::max_element(local_res.begin(), local_res.end()); - } else { - res_ = INT_MIN; - } - return true; -} - -bool kovalchuk_a_max_of_vector_elements_seq::TestSequentialTask::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res_; - return true; -} \ No newline at end of file From 53eea66190c8ce4cbe1e3be779bf2b6b4e26fd1c Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 7 Nov 2024 19:07:20 +0100 Subject: [PATCH 144/155] =?UTF-8?q?Revert=20"=D0=A6=D0=B5=D0=BB=D0=B8?= =?UTF-8?q?=D0=BA=D0=BE=D0=B2=D0=B0=20=D0=90=D1=80=D0=B8=D0=BD=D0=B0.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=202.=20=D0=92=D1=8B=D1=87=D0=B8=D1=81?= =?UTF-8?q?=D0=BB=D0=B5=D0=BD=D0=B8=D0=B5=20=D1=81=D1=80=D0=B5=D0=B4=D0=BD?= =?UTF-8?q?=D0=B5=D0=B3=D0=BE=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D1=8F=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2?= =?UTF-8?q?=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0."=20(#256)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#99 @ArinaTs https://github.com/learning-process/ppc-2024-autumn/actions/runs/11727331876/job/32668145199 --- .../func_tests/main.cpp | 51 ----- .../include/ops_mpi.hpp | 44 ---- .../perf_tests/main.cpp | 49 ----- .../src/ops_mpi.cpp | 107 ---------- .../func_tests/main.cpp | 193 ------------------ .../include/ops_seq.hpp | 23 --- .../perf_tests/main.cpp | 81 -------- .../src/ops_seq.cpp | 40 ---- 8 files changed, 588 deletions(-) delete mode 100644 tasks/mpi/tselikova_a_average_of_vector_elements/func_tests/main.cpp delete mode 100644 tasks/mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp delete mode 100644 tasks/mpi/tselikova_a_average_of_vector_elements/perf_tests/main.cpp delete mode 100644 tasks/mpi/tselikova_a_average_of_vector_elements/src/ops_mpi.cpp delete mode 100644 tasks/seq/tselikova_a_average_of_vector_elements/func_tests/main.cpp delete mode 100644 tasks/seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp delete mode 100644 tasks/seq/tselikova_a_average_of_vector_elements/perf_tests/main.cpp delete mode 100644 tasks/seq/tselikova_a_average_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/tselikova_a_average_of_vector_elements/func_tests/main.cpp b/tasks/mpi/tselikova_a_average_of_vector_elements/func_tests/main.cpp deleted file mode 100644 index b3693b83a48..00000000000 --- a/tasks/mpi/tselikova_a_average_of_vector_elements/func_tests/main.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2024 Tselikova Arina -#include - -#include -#include -#include - -#include "mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp" - -TEST(tselikova_a_average_of_vector_elements_mpi, Test_Average_Vector) { - boost::mpi::communicator world; - std::vector large_vec(1000, 1); - std::vector global_avg{0.0f}; - - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(large_vec.data())); - taskDataPar->inputs_count.emplace_back(large_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_avg.data())); - taskDataPar->outputs_count.emplace_back(global_avg.size()); - } - - tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), true); - testMpiTaskParallel.pre_processing(); - testMpiTaskParallel.run(); - testMpiTaskParallel.post_processing(); - - if (world.rank() == 0) { - float reference_avg = 1.0f; - - ASSERT_FLOAT_EQ(global_avg[0], reference_avg); - } -} - -TEST(tselikova_a_average_of_vector_elements_mpi, Test_EmptyVector) { - boost::mpi::communicator world; - std::vector empty_vec; - std::vector global_avg{0.0f}; - - std::shared_ptr taskDataPar = std::make_shared(); - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(empty_vec.data())); - taskDataPar->inputs_count.emplace_back(empty_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_avg.data())); - taskDataPar->outputs_count.emplace_back(global_avg.size()); - - tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); - ASSERT_EQ(testMpiTaskParallel.validation(), false); - } -} \ No newline at end of file diff --git a/tasks/mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp deleted file mode 100644 index a6f2b0b2bde..00000000000 --- a/tasks/mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#pragma once - -#include - -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace tselikova_a_average_of_vector_elements_mpi { - -class TestMPITaskSequential : public ppc::core::Task { - public: - explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_; - int res{}; -}; - -class TestMPITaskParallel : public ppc::core::Task { - public: - explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_, local_input_; - int res{}; - int sum_; - boost::mpi::communicator world; - int total_elements{}; -}; - -} // namespace tselikova_a_average_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/tselikova_a_average_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/tselikova_a_average_of_vector_elements/perf_tests/main.cpp deleted file mode 100644 index f07619f5078..00000000000 --- a/tasks/mpi/tselikova_a_average_of_vector_elements/perf_tests/main.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2024 Tselikova Arina -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp" - -TEST(mpi_example_perf_test1, test_pipeline_run) { - boost::mpi::communicator world; - std::vector global_vec; - std::vector global_avg(1, 0.0f); - // Create TaskData - std::shared_ptr taskDataPar = std::make_shared(); - int count_size_vector; - if (world.rank() == 0) { - count_size_vector = 10; - global_vec = std::vector(count_size_vector, 1); - taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); - taskDataPar->inputs_count.emplace_back(global_vec.size()); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_avg.data())); - taskDataPar->outputs_count.emplace_back(global_avg.size()); - } - - auto testMpiTaskParallel = - std::make_shared(taskDataPar); - ASSERT_EQ(testMpiTaskParallel->validation(), true); - testMpiTaskParallel->pre_processing(); - testMpiTaskParallel->run(); - testMpiTaskParallel->post_processing(); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testMpiTaskParallel); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_FLOAT_EQ(1.0f, global_avg[0]); - } -} \ No newline at end of file diff --git a/tasks/mpi/tselikova_a_average_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/tselikova_a_average_of_vector_elements/src/ops_mpi.cpp deleted file mode 100644 index e4785c04cee..00000000000 --- a/tasks/mpi/tselikova_a_average_of_vector_elements/src/ops_mpi.cpp +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2024 Tselikova Arina -#include "mpi/tselikova_a_average_of_vector_elements/include/ops_mpi.hpp" - -#include -#include -#include -#include -#include - -using namespace std::chrono_literals; - -bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskSequential::pre_processing() { - internal_order_test(); - int* tmp = reinterpret_cast(taskData->inputs[0]); - input_ = std::vector(taskData->inputs_count[0]); - for (std::size_t i = 0; i < static_cast(taskData->inputs_count[0]); i++) { - input_[i] = tmp[i]; - } - res = 0; - return true; -} - -bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskSequential::validation() { - internal_order_test(); - return taskData->inputs_count[0] >= 1 && taskData->outputs_count[0] == 1; -} - -bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskSequential::run() { - internal_order_test(); - int sum = 0; - for (std::size_t i = 0; i < input_.size(); i++) { - sum += input_[i]; - } - res = static_cast(sum) / input_.size(); - return true; -} - -bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskSequential::post_processing() { - internal_order_test(); - std::cout << res << std::endl; - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} - -bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel::pre_processing() { - internal_order_test(); - - if (world.rank() == 0) { - input_ = std::vector(taskData->inputs_count[0]); - auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); - for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { - input_[i] = tmp_ptr[i]; - } - } - - res = 0; - return true; -} - -bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel::validation() { - internal_order_test(); - if (world.rank() == 0) { - return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] >= 1; - } - return true; -} - -bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel::run() { - internal_order_test(); - - unsigned int delta = 0; - if (world.rank() == 0) { - delta = taskData->inputs_count[0] / world.size(); - total_elements = taskData->inputs_count[0]; - } - broadcast(world, delta, 0); - broadcast(world, total_elements, 0); - if (world.rank() == 0) { - for (int proc = 1; proc < world.size(); proc++) { - unsigned int start_index = proc * delta; - unsigned int count = (proc == world.size() - 1) ? (total_elements - start_index) : delta; - world.send(proc, 0, input_.data() + start_index, count); - } - } - local_input_ = std::vector(delta); - if (world.rank() == 0) { - local_input_ = std::vector(input_.begin(), input_.begin() + delta); - } else { - world.recv(0, 0, local_input_.data(), delta); - } - - int local_sum = 0; - for (unsigned int i = 0; i < local_input_.size(); i++) { - local_sum += local_input_[i]; - } - reduce(world, local_sum, sum_, std::plus<>(), 0); - return true; -} - -bool tselikova_a_average_of_vector_elements_mpi::TestMPITaskParallel::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - res = static_cast(sum_) / total_elements; - reinterpret_cast(taskData->outputs[0])[0] = res; - } - return true; -} \ No newline at end of file diff --git a/tasks/seq/tselikova_a_average_of_vector_elements/func_tests/main.cpp b/tasks/seq/tselikova_a_average_of_vector_elements/func_tests/main.cpp deleted file mode 100644 index 3cb5f2a372d..00000000000 --- a/tasks/seq/tselikova_a_average_of_vector_elements/func_tests/main.cpp +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2024 Tselikova Arina -#include - -#include - -#include "seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp" - -TEST(tselikova_a_average_of_vector_elements, check_vector_with_similar_elem) { - const int count = 3; - - // Create data - std::vector in(10, count); - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(count, out[0]); -} - -TEST(tselikova_a_average_of_vector_elements, check_vector_with_integer_value) { - // Create data - std::vector in{2, 4, 6, 8, 10}; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(6, out[0]); -} - -TEST(tselikova_a_average_of_vector_elements, check_empty_vector) { - // Create data - std::vector in(0); - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), false); -} - -TEST(tselikova_a_average_of_vector_elements, check_vector_with_one_elem) { - // Create data - std::vector in{5}; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(5, out[0]); -} - -TEST(tselikova_a_average_of_vector_elements, check_vector_with_two_elem) { - // Create data - std::vector in{2, 6}; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(4, out[0]); -} - -TEST(tselikova_a_average_of_vector_elements, check_vector_with_three_elem) { - // Create data - std::vector in{1, 2, 3}; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(2, out[0]); -} - -TEST(tselikova_a_average_of_vector_elements, check_vector_with_fraction_value) { - // Create data - std::vector in{7, 9, 13}; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_NEAR(9.67, out[0], 0.1); -} - -TEST(tselikova_a_average_of_vector_elements, check_vector_with_negative_elem) { - // Create data - std::vector in{-2, -4, -6, -8, -10}; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(-6, out[0]); -} - -TEST(tselikova_a_average_of_vector_elements, check_vector_with_zero_av) { - // Create data - std::vector in{2, -2}; - std::vector out(1, 0); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - tselikova_a_average_of_vector_elements::TestTaskSequential testTaskSequential(taskDataSeq); - ASSERT_EQ(testTaskSequential.validation(), true); - testTaskSequential.pre_processing(); - testTaskSequential.run(); - testTaskSequential.post_processing(); - ASSERT_EQ(0, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp b/tasks/seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp deleted file mode 100644 index 88145f8e4bc..00000000000 --- a/tasks/seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2024 Tselikova Arina -#pragma once - -#include - -#include "core/task/include/task.hpp" - -namespace tselikova_a_average_of_vector_elements { - -class TestTaskSequential : public ppc::core::Task { - public: - explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - std::vector input_{}; - float res{}; -}; - -} // namespace tselikova_a_average_of_vector_elements \ No newline at end of file diff --git a/tasks/seq/tselikova_a_average_of_vector_elements/perf_tests/main.cpp b/tasks/seq/tselikova_a_average_of_vector_elements/perf_tests/main.cpp deleted file mode 100644 index 08111defc44..00000000000 --- a/tasks/seq/tselikova_a_average_of_vector_elements/perf_tests/main.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2023 Nesterov Alexander -#include - -#include - -#include "core/perf/include/perf.hpp" -#include "seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp" - -TEST(tselikova_a_average_of_vector_elements, test_pipeline_run) { - const int count = 100; - - // Create data - std::vector in(1, count); - std::vector out(1, 0.0f); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_FLOAT_EQ(count, out[0]); -} - -TEST(tselikova_a_average_of_vector_elements, test_task_run) { - const int count = 100; - - // Create data - std::vector in(1, count); - std::vector out(1, 0.0f); - - // Create TaskData - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); - taskDataSeq->inputs_count.emplace_back(in.size()); - taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); - taskDataSeq->outputs_count.emplace_back(out.size()); - - // Create Task - auto testTaskSequential = std::make_shared(taskDataSeq); - - // Create Perf attributes - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - // Create and init perf results - auto perfResults = std::make_shared(); - - // Create Perf analyzer - auto perfAnalyzer = std::make_shared(testTaskSequential); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - ASSERT_EQ(count, out[0]); -} \ No newline at end of file diff --git a/tasks/seq/tselikova_a_average_of_vector_elements/src/ops_seq.cpp b/tasks/seq/tselikova_a_average_of_vector_elements/src/ops_seq.cpp deleted file mode 100644 index d3ad1299fec..00000000000 --- a/tasks/seq/tselikova_a_average_of_vector_elements/src/ops_seq.cpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2024 Tselikova Arina -#include "seq/tselikova_a_average_of_vector_elements/include/ops_seq.hpp" - -#include -#include -#include - -using namespace std::chrono_literals; - -bool tselikova_a_average_of_vector_elements::TestTaskSequential::pre_processing() { - internal_order_test(); - int* tmp = reinterpret_cast(taskData->inputs[0]); - input_ = std::vector(taskData->inputs_count[0]); - for (std::size_t i = 0; i < (std::size_t)taskData->inputs_count[0]; i++) { - input_[i] = tmp[i]; - } - res = 0; - return true; -} - -bool tselikova_a_average_of_vector_elements::TestTaskSequential::validation() { - internal_order_test(); - return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; -} - -bool tselikova_a_average_of_vector_elements::TestTaskSequential::run() { - internal_order_test(); - int sum = 0; - for (std::size_t i = 0; i < input_.size(); i++) { - sum += input_[i]; - } - res = static_cast(sum) / input_.size(); - return true; -} - -bool tselikova_a_average_of_vector_elements::TestTaskSequential::post_processing() { - internal_order_test(); - reinterpret_cast(taskData->outputs[0])[0] = res; - return true; -} From b820b9902b396d76e0a0422e5bc4a450f34a4866 Mon Sep 17 00:00:00 2001 From: DSFKnight <125854188+DSFKnight@users.noreply.github.com> Date: Thu, 7 Nov 2024 21:37:38 +0300 Subject: [PATCH 145/155] =?UTF-8?q?=D0=93=D1=80=D0=BE=D0=BC=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B5=D0=B9.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=201.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D1=8D?= =?UTF-8?q?=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5?= =?UTF-8?q?=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#158)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Последовательная задача (seq)**: Последовательная задача выполняется в классе `SumOfVector`, где элементы входного вектора суммируются с помощью простого цикла. Перед запуском происходит валидация данных и их предварительная обработка. В процессе вычисления каждый элемент вектора добавляется к общей сумме, которая затем сохраняется в выходных данных. **Параллельная задача (mpi)**: В классе `MPISumOfVectorParallel` входной вектор делится на части между процессами. Каждый процесс выполняет вычисления на своей части данных с использованием `std::accumulate` или других функций, таких как `std::max_element` и `std::min_element`, в зависимости от типа операции. После локальных вычислений результаты собираются с помощью MPI-функции `reduce`. --- .../func_tests/main.cpp | 323 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 +++ .../perf_tests/main.cpp | 89 +++++ .../src/ops_mpi.cpp | 119 +++++++ .../func_tests/main.cpp | 224 ++++++++++++ .../include/ops_seq.hpp | 22 ++ .../perf_tests/main.cpp | 80 +++++ .../src/ops_seq.cpp | 30 ++ 8 files changed, 934 insertions(+) create mode 100644 tasks/mpi/gromov_a_sum_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/gromov_a_sum_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/gromov_a_sum_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/gromov_a_sum_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/gromov_a_sum_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/gromov_a_sum_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/gromov_a_sum_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/gromov_a_sum_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/gromov_a_sum_of_vector_elements/func_tests/main.cpp b/tasks/mpi/gromov_a_sum_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..df15f4d8178 --- /dev/null +++ b/tasks/mpi/gromov_a_sum_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,323 @@ +#include + +#include +#include +#include +#include + +#include "mpi/gromov_a_sum_of_vector_elements/include/ops_mpi.hpp" + +namespace gromov_a_sum_of_vector_elements_mpi { +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::uniform_int_distribution dist(-100, 100); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} +} // namespace gromov_a_sum_of_vector_elements_mpi + +TEST(gromov_a_sum_of_vector_elements_mpi, Test_Min1) { + boost::mpi::communicator world; + std::vector global_vec = {-10, -20, 0, 15, -30}; + std::vector global_min(1, std::numeric_limits::max()); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel MPISumOfVectorParallel(taskDataPar, "min"); + ASSERT_EQ(MPISumOfVectorParallel.validation(), true); + MPISumOfVectorParallel.pre_processing(); + MPISumOfVectorParallel.run(); + MPISumOfVectorParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, std::numeric_limits::max()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential MPISumOfVectorSequential(taskDataSeq, "min"); + ASSERT_EQ(MPISumOfVectorSequential.validation(), true); + MPISumOfVectorSequential.pre_processing(); + MPISumOfVectorSequential.run(); + MPISumOfVectorSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(gromov_a_sum_of_vector_elements_mpi, Test_Min2) { + boost::mpi::communicator world; + std::vector global_vec = {-10, -17, 1, 19, 28}; + std::vector global_min(1, std::numeric_limits::max()); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel MPISumOfVectorParallel(taskDataPar, "min"); + ASSERT_EQ(MPISumOfVectorParallel.validation(), true); + MPISumOfVectorParallel.pre_processing(); + MPISumOfVectorParallel.run(); + MPISumOfVectorParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, std::numeric_limits::max()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential MPISumOfVectorSequential(taskDataSeq, "min"); + ASSERT_EQ(MPISumOfVectorSequential.validation(), true); + MPISumOfVectorSequential.pre_processing(); + MPISumOfVectorSequential.run(); + MPISumOfVectorSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(gromov_a_sum_of_vector_elements_mpi, Test_Min3) { + boost::mpi::communicator world; + std::vector global_vec = {-10, -20, 0, -30, 15}; + std::vector global_min(1, std::numeric_limits::max()); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel MPISumOfVectorParallel(taskDataPar, "min"); + ASSERT_EQ(MPISumOfVectorParallel.validation(), true); + MPISumOfVectorParallel.pre_processing(); + MPISumOfVectorParallel.run(); + MPISumOfVectorParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, std::numeric_limits::max()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential MPISumOfVectorSequential(taskDataSeq, "min"); + ASSERT_EQ(MPISumOfVectorSequential.validation(), true); + MPISumOfVectorSequential.pre_processing(); + MPISumOfVectorSequential.run(); + MPISumOfVectorSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(gromov_a_sum_of_vector_elements_mpi, Test_Max1) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, std::numeric_limits::min()); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 250; + global_vec = gromov_a_sum_of_vector_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel MPISumOfVectorParallel(taskDataPar, "max"); + ASSERT_EQ(MPISumOfVectorParallel.validation(), true); + MPISumOfVectorParallel.pre_processing(); + MPISumOfVectorParallel.run(); + MPISumOfVectorParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, std::numeric_limits::min()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential MPISumOfVectorSequential(taskDataSeq, "max"); + ASSERT_EQ(MPISumOfVectorSequential.validation(), true); + MPISumOfVectorSequential.pre_processing(); + MPISumOfVectorSequential.run(); + MPISumOfVectorSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(gromov_a_sum_of_vector_elements_mpi, Test_Max2) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, std::numeric_limits::min()); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 200; + global_vec = gromov_a_sum_of_vector_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel MPISumOfVectorParallel(taskDataPar, "max"); + ASSERT_EQ(MPISumOfVectorParallel.validation(), true); + MPISumOfVectorParallel.pre_processing(); + MPISumOfVectorParallel.run(); + MPISumOfVectorParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, std::numeric_limits::min()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential MPISumOfVectorSequential(taskDataSeq, "max"); + ASSERT_EQ(MPISumOfVectorSequential.validation(), true); + MPISumOfVectorSequential.pre_processing(); + MPISumOfVectorSequential.run(); + MPISumOfVectorSequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(gromov_a_sum_of_vector_elements_mpi, Test_Addition1) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_add(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 200; + global_vec = gromov_a_sum_of_vector_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_add.data())); + taskDataPar->outputs_count.emplace_back(global_add.size()); + } + + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel MPISumOfVectorParallel(taskDataPar, "add"); + ASSERT_EQ(MPISumOfVectorParallel.validation(), true); + MPISumOfVectorParallel.pre_processing(); + MPISumOfVectorParallel.run(); + MPISumOfVectorParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_add(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_add.data())); + taskDataSeq->outputs_count.emplace_back(reference_add.size()); + + // Create Task + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential MPISumOfVectorSequential(taskDataSeq, "add"); + ASSERT_EQ(MPISumOfVectorSequential.validation(), true); + MPISumOfVectorSequential.pre_processing(); + MPISumOfVectorSequential.run(); + MPISumOfVectorSequential.post_processing(); + + ASSERT_EQ(reference_add[0], global_add[0]); + } +} + +TEST(gromov_a_sum_of_vector_elements_mpi, Test_Addition2) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_add(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 500; + global_vec = gromov_a_sum_of_vector_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_add.data())); + taskDataPar->outputs_count.emplace_back(global_add.size()); + } + + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel MPISumOfVectorParallel(taskDataPar, "add"); + ASSERT_EQ(MPISumOfVectorParallel.validation(), true); + MPISumOfVectorParallel.pre_processing(); + MPISumOfVectorParallel.run(); + MPISumOfVectorParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_add(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_add.data())); + taskDataSeq->outputs_count.emplace_back(reference_add.size()); + + // Create Task + gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential MPISumOfVectorSequential(taskDataSeq, "add"); + ASSERT_EQ(MPISumOfVectorSequential.validation(), true); + MPISumOfVectorSequential.pre_processing(); + MPISumOfVectorSequential.run(); + MPISumOfVectorSequential.post_processing(); + + ASSERT_EQ(reference_add[0], global_add[0]); + } +} diff --git a/tasks/mpi/gromov_a_sum_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/gromov_a_sum_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..674ba4b01e3 --- /dev/null +++ b/tasks/mpi/gromov_a_sum_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace gromov_a_sum_of_vector_elements_mpi { + +class MPISumOfVectorSequential : public ppc::core::Task { + public: + explicit MPISumOfVectorSequential(std::shared_ptr taskData_, std::string ops_) + : Task(std::move(taskData_)), ops(std::move(ops_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; + std::string ops; +}; + +class MPISumOfVectorParallel : public ppc::core::Task { + public: + explicit MPISumOfVectorParallel(std::shared_ptr taskData_, std::string ops_) + : Task(std::move(taskData_)), ops(std::move(ops_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace gromov_a_sum_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/gromov_a_sum_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/gromov_a_sum_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..ed0ebc4e858 --- /dev/null +++ b/tasks/mpi/gromov_a_sum_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,89 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/gromov_a_sum_of_vector_elements/include/ops_mpi.hpp" + +TEST(gromov_a_sum_of_vector_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 50000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto MPISumOfVectorParallel = + std::make_shared(taskDataPar, "add"); + ASSERT_EQ(MPISumOfVectorParallel->validation(), true); + MPISumOfVectorParallel->pre_processing(); + MPISumOfVectorParallel->run(); + MPISumOfVectorParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MPISumOfVectorParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} + +TEST(gromov_a_sum_of_vector_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 50000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto MPISumOfVectorParallel = + std::make_shared(taskDataPar, "add"); + ASSERT_EQ(MPISumOfVectorParallel->validation(), true); + MPISumOfVectorParallel->pre_processing(); + MPISumOfVectorParallel->run(); + MPISumOfVectorParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MPISumOfVectorParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} diff --git a/tasks/mpi/gromov_a_sum_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/gromov_a_sum_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..0a7fe28f740 --- /dev/null +++ b/tasks/mpi/gromov_a_sum_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,119 @@ +#include "mpi/gromov_a_sum_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +bool gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + // Init value for output + res = 0; + return true; +} + +bool gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential::run() { + internal_order_test(); + if (ops == "add") { + res = std::accumulate(input_.begin(), input_.end(), 0); + } else if (ops == "max") { + res = *std::max_element(input_.begin(), input_.end()); + } else if (ops == "min") { + res = *std::min_element(input_.begin(), input_.end()); + } + return true; +} + +bool gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel::pre_processing() { + internal_order_test(); + res = 0; + return true; +} + +bool gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel::run() { + internal_order_test(); + unsigned int delta = 0; + unsigned int alpha = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + alpha = taskData->inputs_count[0] % world.size(); + } + broadcast(world, delta, 0); + broadcast(world, alpha, 0); + + if (world.rank() == 0) { + input_.assign(reinterpret_cast(taskData->inputs[0]), + reinterpret_cast(taskData->inputs[0]) + taskData->inputs_count[0]); + for (int proc = 1; proc < world.size(); ++proc) { + unsigned int send_size = (proc == world.size() - 1) ? delta + alpha : delta; + world.send(proc, 0, input_.data() + proc * delta, send_size); + } + } + + unsigned int local_size = (world.rank() == world.size() - 1) ? delta + alpha : delta; + local_input_.resize(local_size); + + if (world.rank() != 0) { + world.recv(0, 0, local_input_.data(), local_size); + } else { + std::copy(input_.begin(), input_.begin() + delta, local_input_.begin()); + } + + int local_res = 0; + if (ops == "add") { + local_res = std::accumulate(local_input_.begin(), local_input_.end(), 0); + } else if (ops == "max") { + local_res = *std::max_element(local_input_.begin(), local_input_.end()); + } else if (ops == "min") { + local_res = *std::min_element(local_input_.begin(), local_input_.end()); + } + + if (ops == "add") { + reduce(world, local_res, res, std::plus(), 0); + } else if (ops == "max") { + reduce(world, local_res, res, boost::mpi::maximum(), 0); + } else if (ops == "min") { + reduce(world, local_res, res, boost::mpi::minimum(), 0); + if (world.rank() == 0) { + if (input_.back() < res) { + res = input_.back(); + } + } + } + return true; +} + +bool gromov_a_sum_of_vector_elements_mpi::MPISumOfVectorParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/gromov_a_sum_of_vector_elements/func_tests/main.cpp b/tasks/seq/gromov_a_sum_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..7a465d34466 --- /dev/null +++ b/tasks/seq/gromov_a_sum_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,224 @@ +#include + +#include + +#include "seq/gromov_a_sum_of_vector_elements/include/ops_seq.hpp" + +TEST(gromov_a_sum_of_vector_elements_seq, Test_Sum_30) { + const int count = 30; + + // Create data + std::vector in(1, count); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(count, out[0]); +} + +TEST(gromov_a_sum_of_vector_elements_seq, Test_Max_Element) { + std::vector in = {4, 1, 3, 2, 5}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(5, *std::max_element(in.begin(), in.end())); +} + +TEST(gromov_a_sum_of_vector_elements_seq, Test_Min_Element) { + std::vector in = {1, 3, 5, 2, 4}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(1, *std::min_element(in.begin(), in.end())); +} + +TEST(gromov_a_sum_of_vector_elements_seq, Test_SumTwoElements) { + const int a = 2; + const int b = 3; + std::vector in = {a, b}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(a + b, out[0]); +} + +TEST(gromov_a_sum_of_vector_elements_seq, Test_BigVector) { + const int count = 10000; + + // Create data + std::vector in(count, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(count, out[0]); +} + +TEST(gromov_a_sum_of_vector_elements_seq, Test_Sum_70) { + const int count = 70; + + // Create data + std::vector in(1, count); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(count, out[0]); +} + +TEST(gromov_a_sum_of_vector_elements_seq, Test_Max2_Element) { + std::vector in = {9, 15, 43, 22, 11}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(43, *std::max_element(in.begin(), in.end())); +} + +TEST(gromov_a_sum_of_vector_elements_seq, Test_BigVector2) { + const int count = 30000; + + // Create data + std::vector in(count, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(count, out[0]); +} + +TEST(gromov_a_sum_of_vector_elements_seq, Test_SumTwoElements2) { + const int a = 105; + const int b = 113; + std::vector in = {a, b}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(a + b, out[0]); +} + +TEST(gromov_a_sum_of_vector_elements_seq, Test_BigVector3) { + const int count = 500000; + + // Create data + std::vector in(count, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + gromov_a_sum_of_vector_elements_seq::SumOfVector sumOfVector(taskDataSeq); + ASSERT_EQ(sumOfVector.validation(), true); + sumOfVector.pre_processing(); + sumOfVector.run(); + sumOfVector.post_processing(); + ASSERT_EQ(count, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/gromov_a_sum_of_vector_elements/include/ops_seq.hpp b/tasks/seq/gromov_a_sum_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..857c518e6be --- /dev/null +++ b/tasks/seq/gromov_a_sum_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace gromov_a_sum_of_vector_elements_seq { + +class SumOfVector : public ppc::core::Task { + public: + explicit SumOfVector(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int res{}; +}; + +} // namespace gromov_a_sum_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/gromov_a_sum_of_vector_elements/perf_tests/main.cpp b/tasks/seq/gromov_a_sum_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..e30c45c4936 --- /dev/null +++ b/tasks/seq/gromov_a_sum_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,80 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/gromov_a_sum_of_vector_elements/include/ops_seq.hpp" + +TEST(gromov_a_sum_of_vector_elements_seq, test_pipeline_run) { + const int count = 50000000; + + // Create data + std::vector in(count, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto sumOfVector = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumOfVector); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(gromov_a_sum_of_vector_elements_seq, test_task_run) { + const int count = 50000000; + + // Create data + std::vector in(count, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto sumOfVector = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumOfVector); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/gromov_a_sum_of_vector_elements/src/ops_seq.cpp b/tasks/seq/gromov_a_sum_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..1de4977897b --- /dev/null +++ b/tasks/seq/gromov_a_sum_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,30 @@ +#include "seq/gromov_a_sum_of_vector_elements/include/ops_seq.hpp" + +bool gromov_a_sum_of_vector_elements_seq::SumOfVector::pre_processing() { + internal_order_test(); + // Init value for input and output + res = 0; + return true; +} + +bool gromov_a_sum_of_vector_elements_seq::SumOfVector::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +bool gromov_a_sum_of_vector_elements_seq::SumOfVector::run() { + internal_order_test(); + int* inputPtr = reinterpret_cast(taskData->inputs[0]); + int count = taskData->inputs_count[0]; + for (int i = 0; i < count; ++i) { + res += inputPtr[i]; + } + return true; +} + +bool gromov_a_sum_of_vector_elements_seq::SumOfVector::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From 214f906d145bda69567e53dcac334a2503b81322 Mon Sep 17 00:00:00 2001 From: Tarakanov Denis <126470016+Chiks37@users.noreply.github.com> Date: Fri, 8 Nov 2024 02:35:20 +0300 Subject: [PATCH 146/155] =?UTF-8?q?=D0=A2=D0=B0=D1=80=D0=B0=D0=BA=D0=B0?= =?UTF-8?q?=D0=BD=D0=BE=D0=B2=20=D0=94=D0=B5=D0=BD=D0=B8=D1=81.=20=D0=97?= =?UTF-8?q?=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B0=D0=BD=D1=82=2020.=20=D0=98=D0=BD=D1=82=D0=B5=D0=B3=D1=80?= =?UTF-8?q?=D0=B8=D1=80=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B5=20-=20=D0=BC?= =?UTF-8?q?=D0=B5=D1=82=D0=BE=D0=B4=20=D1=82=D1=80=D0=B0=D0=BF=D0=B5=D1=86?= =?UTF-8?q?=D0=B8=D0=B9.=20(#161)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../func_tests/main.cpp | 194 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 +++++ .../perf_tests/main.cpp | 96 +++++++++ .../src/ops_mpi.cpp | 109 ++++++++++ .../func_tests/main.cpp | 66 ++++++ .../include/ops_seq.hpp | 25 +++ .../perf_tests/main.cpp | 80 ++++++++ .../src/ops_seq.cpp | 44 ++++ 8 files changed, 662 insertions(+) create mode 100644 tasks/mpi/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp create mode 100644 tasks/mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp create mode 100644 tasks/mpi/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp create mode 100644 tasks/mpi/tarakanov_d_integration_the_trapezoid_method/src/ops_mpi.cpp create mode 100644 tasks/seq/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp create mode 100644 tasks/seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp create mode 100644 tasks/seq/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp create mode 100644 tasks/seq/tarakanov_d_integration_the_trapezoid_method/src/ops_seq.cpp diff --git a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp new file mode 100644 index 00000000000..2b8f93a7ed7 --- /dev/null +++ b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp @@ -0,0 +1,194 @@ +// Copyright 2024 Tarakanov Denis +#include + +#include +#include +#include +#include + +#include "mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp" + +TEST(tarakanov_d_integration_the_trapezoid_method_mpi_func_tests, Test_Integration1) { + boost::mpi::communicator world; + std::vector global_res(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + double a = 0.0; + double b = 1.0; + double h = 1e-8; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); + taskDataPar->inputs_count.emplace_back(3); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par parallelTask(taskDataPar); + + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_res(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&h)); + taskDataSeq->inputs_count.emplace_back(3); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq sequentialTask(taskDataSeq); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_res[0], global_res[0], 0.1); + } +} + +TEST(tarakanov_d_integration_the_trapezoid_method_mpi_func_tests, Test_Integration2) { + boost::mpi::communicator world; + std::vector global_res(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + double a = 5.0; + double b = 7.0; + double h = 1e-8; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); + taskDataPar->inputs_count.emplace_back(3); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par parallelTask(taskDataPar); + + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_res(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&h)); + taskDataSeq->inputs_count.emplace_back(3); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq sequentialTask(taskDataSeq); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_res[0], global_res[0], 0.1); + } +} + +TEST(tarakanov_d_integration_the_trapezoid_method_mpi_func_tests, Test_Integration3) { + boost::mpi::communicator world; + std::vector global_res(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + double a = -2.0; + double b = -1.0; + double h = 1e-8; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); + taskDataPar->inputs_count.emplace_back(3); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par parallelTask(taskDataPar); + + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_res(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&h)); + taskDataSeq->inputs_count.emplace_back(3); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq sequentialTask(taskDataSeq); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_res[0], global_res[0], 0.1); + } +} + +TEST(tarakanov_d_integration_the_trapezoid_method_mpi_func_tests, Test_Integration_random_data) { + std::srand(static_cast(std::time(nullptr))); + + boost::mpi::communicator world; + std::vector global_res(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + double a = std::rand() % (100); + double b = a + std::rand() % (5); + double h = 1e-8; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); + taskDataPar->inputs_count.emplace_back(3); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par parallelTask(taskDataPar); + + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_res(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&h)); + taskDataSeq->inputs_count.emplace_back(3); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(1); + + tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq sequentialTask(taskDataSeq); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_res[0], global_res[0], 0.1); + } +} \ No newline at end of file diff --git a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp new file mode 100644 index 00000000000..3ecfc7e0c44 --- /dev/null +++ b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2024 Tarakanov Denis +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace tarakanov_d_integration_the_trapezoid_method_mpi { + +class integration_the_trapezoid_method_seq : public ppc::core::Task { + public: + explicit integration_the_trapezoid_method_seq(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + double a{}, b{}, h{}, res{}; + + static double f(double x) { return x * x; }; +}; + +class integration_the_trapezoid_method_par : public ppc::core::Task { + public: + explicit integration_the_trapezoid_method_par(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double res{}; + + double a{}, b{}, h{}, local_a{}; + uint32_t partsCount{}, localPartsCount{}; + + static double f(double x) { return x * x; }; + + boost::mpi::communicator world; +}; + +} // namespace tarakanov_d_integration_the_trapezoid_method_mpi \ No newline at end of file diff --git a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp new file mode 100644 index 00000000000..a8b29e4a3ba --- /dev/null +++ b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp @@ -0,0 +1,96 @@ +// Copyright 2024 Tarakanov Denis +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp" + +TEST(tarakanov_d_integration_the_trapezoid_method_mpi_perf_tests, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_res(1, 0.0); + + double a = 0.0; + double b = 1.0; + double h = 1e-8; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); + taskDataPar->inputs_count.emplace_back(3); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto parallelTask = + std::make_shared( + taskDataPar); + + ASSERT_EQ(parallelTask->validation(), true); + parallelTask->pre_processing(); + parallelTask->run(); + parallelTask->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(parallelTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_value = 0.335; + ASSERT_NEAR(expected_value, global_res[0], 0.1); + } +} + +TEST(tarakanov_d_integration_the_trapezoid_method_mpi_perf_tests, test_task_run) { + boost::mpi::communicator world; + std::vector global_res(1, 0.0); + + double a = 0.0; + double b = 1.0; + double h = 1e-8; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); + taskDataPar->inputs_count.emplace_back(3); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto parallelTask = + std::make_shared( + taskDataPar); + + ASSERT_EQ(parallelTask->validation(), true); + parallelTask->pre_processing(); + parallelTask->run(); + parallelTask->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(parallelTask); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_value = 0.335; + ASSERT_NEAR(expected_value, global_res[0], 0.1); + } +} \ No newline at end of file diff --git a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/src/ops_mpi.cpp b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/src/ops_mpi.cpp new file mode 100644 index 00000000000..707abc36295 --- /dev/null +++ b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/src/ops_mpi.cpp @@ -0,0 +1,109 @@ +// Copyright 2024 Tarakanov Denis +#include "mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp" + +#include + +bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq::pre_processing() { + internal_order_test(); + + // Init value for input and output + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + h = *reinterpret_cast(taskData->inputs[2]); + res = 0; + return true; +} + +bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq::validation() { + internal_order_test(); + + // Check count elements of output + return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; +} + +bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq::run() { + internal_order_test(); + + int n = static_cast((b - a) / h); + double integral = 0.0; + + // summing trapezoid areas + for (int i = 1; i < n; ++i) { + double x0 = a + i * h; // left trapezoid edge + double x1 = a + (i + 1) * h; // right trapezoid edge + integral += 0.5 * (f(x0) + f(x1)); // trapezoid area + } + integral *= h; + + res = integral; + + return true; +} + +bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq::post_processing() { + internal_order_test(); + + *reinterpret_cast(taskData->outputs[0]) = res; + return true; +} + +bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par::pre_processing() { + internal_order_test(); + + // Init value for input and output + if (world.rank() == 0) { + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + h = *reinterpret_cast(taskData->inputs[2]); + res = 0; + } + + return true; +} + +bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par::validation() { + internal_order_test(); + // Check count elements of output + if (world.rank() == 0) { + uint32_t tmp1 = taskData->inputs_count[0]; + uint32_t tmp2 = taskData->outputs_count[0]; + return tmp1 == 3 && tmp2 == 1; + } + return true; +} + +bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par::run() { + internal_order_test(); + + boost::mpi::broadcast(world, a, 0); + boost::mpi::broadcast(world, b, 0); + boost::mpi::broadcast(world, h, 0); + + partsCount = (b - a) / h; + localPartsCount = partsCount / world.size(); + localPartsCount = world.rank() < static_cast(partsCount) % world.size() ? localPartsCount + 1 : localPartsCount; + + local_a = a + world.rank() * localPartsCount * h; + + double local_res = 0.0; + local_res += (f(local_a) + f(local_a + localPartsCount * h)) * 0.5; + + for (unsigned int i = 0; i < localPartsCount; ++i) { + double x0 = local_a + i * h; // left trapezoid edge + double x1 = local_a + (i + 1) * h; // right trapezoid edge + local_res += 0.5 * (f(x0) + f(x1)); // trapezoid area + } + local_res *= h; + + boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); + + return true; +} + +bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res; + } + return true; +} \ No newline at end of file diff --git a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp new file mode 100644 index 00000000000..9c19c2b1e6a --- /dev/null +++ b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp @@ -0,0 +1,66 @@ +#include + +#include + +#include "seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp" + +using namespace tarakanov_d_integration_the_trapezoid_method_seq; + +auto createTaskData(double* a, double* b, double* h, double* res) { + auto data = std::make_shared(); + + data->inputs.push_back(reinterpret_cast(a)); + data->inputs.push_back(reinterpret_cast(b)); + data->inputs.push_back(reinterpret_cast(h)); + + data->inputs_count.push_back(3); + + data->outputs.push_back(reinterpret_cast(res)); + data->outputs_count.push_back(1); + + return data; +} + +TEST(tarakanov_d_integration_the_trapezoid_method_func_test, ValidationWorks) { + double a = 0.0; + double b = 1.0; + double h = 0.1; + double res = 0.0; + auto data = createTaskData(&a, &b, &h, &res); + + integration_the_trapezoid_method task(data); + + EXPECT_TRUE(task.validation()); +} + +TEST(tarakanov_d_integration_the_trapezoid_method_func_test, PreProcessingWorks) { + double a = 0.0; + double b = 1.0; + double h = 0.1; + double res = 0.0; + auto data = createTaskData(&a, &b, &h, &res); + integration_the_trapezoid_method task(data); + + EXPECT_TRUE(task.validation()); + EXPECT_TRUE(task.pre_processing()); + EXPECT_EQ(task.get_data()->inputs_count[0], 3.0); + EXPECT_EQ(task.get_data()->outputs_count[0], 1.0); +} + +TEST(tarakanov_d_integration_the_trapezoid_method_func_test, PostProcessingWorks) { + double a = 0.0; + double b = 1.0; + double h = 0.1; + double res = 0.0; + auto data = createTaskData(&a, &b, &h, &res); + + integration_the_trapezoid_method task(data); + EXPECT_TRUE(task.validation()); + EXPECT_TRUE(task.pre_processing()); + EXPECT_TRUE(task.run()); + EXPECT_TRUE(task.post_processing()); + + double output = *reinterpret_cast(data->outputs[0]); + bool flag = output == 0.0; + EXPECT_FALSE(flag); +} diff --git a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp new file mode 100644 index 00000000000..465050c2b05 --- /dev/null +++ b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp @@ -0,0 +1,25 @@ +// Copyright 2024 Tarakanov Denis +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace tarakanov_d_integration_the_trapezoid_method_seq { + +class integration_the_trapezoid_method : public ppc::core::Task { + public: + explicit integration_the_trapezoid_method(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + double a{}, b{}, h{}, res{}; + + static double f(double x) { return x * x; }; +}; + +} // namespace tarakanov_d_integration_the_trapezoid_method_seq \ No newline at end of file diff --git a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp new file mode 100644 index 00000000000..d0c641be753 --- /dev/null +++ b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp @@ -0,0 +1,80 @@ +// Copyright 2023 Tarakanov Denis +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp" + +using namespace tarakanov_d_integration_the_trapezoid_method_seq; + +TEST(trapezoid_method_perf_test, test_pipeline_run) { + double a = 0.0; + double b = 1.0; + double h = 0.1; + + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&h)); + taskData->inputs_count.push_back(3); + + double out = 0.0; + taskData->outputs.push_back(reinterpret_cast(&out)); + taskData->outputs_count.push_back(1); + + auto task = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(task); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + double expected_result = 0.335; + EXPECT_DOUBLE_EQ(out, expected_result); +} + +TEST(trapezoid_method_perf_test, test_task_run) { + double a = 0.0; + double b = 1.0; + double h = 0.1; + + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&h)); + taskData->inputs_count.push_back(3); + + double out = 0.0; + taskData->outputs.push_back(reinterpret_cast(&out)); + taskData->outputs_count.push_back(1); + + auto task = std::make_shared(taskData); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(task); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + double expected_result = 0.335; + EXPECT_DOUBLE_EQ(out, expected_result); +} \ No newline at end of file diff --git a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/src/ops_seq.cpp b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/src/ops_seq.cpp new file mode 100644 index 00000000000..338db148881 --- /dev/null +++ b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/src/ops_seq.cpp @@ -0,0 +1,44 @@ +// Copyright 2024 Tarakanov Denis +#include "seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp" + +bool tarakanov_d_integration_the_trapezoid_method_seq::integration_the_trapezoid_method::pre_processing() { + internal_order_test(); + + // Init value for input and output + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + h = *reinterpret_cast(taskData->inputs[2]); + res = 0; + return true; +} + +bool tarakanov_d_integration_the_trapezoid_method_seq::integration_the_trapezoid_method::validation() { + internal_order_test(); + + // Check count elements of output + return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; +} + +bool tarakanov_d_integration_the_trapezoid_method_seq::integration_the_trapezoid_method::run() { + internal_order_test(); + + int n = static_cast((b - a) / h); + double integral = 0.0; + + // summing trapezoid areas + for (int i = 0; i < n; ++i) { + double x0 = a + i * h; // left trapezoid edge + double x1 = a + (i + 1) * h; // right trapezoid edge + integral += 0.5 * (x0 * x0 + x1 * x1) * h; // trapezoid area + } + + res = integral; + + return true; +} + +bool tarakanov_d_integration_the_trapezoid_method_seq::integration_the_trapezoid_method::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = res; + return true; +} From 125d3ce1ab8df7ac2aa0b36bbae323c5e15dcc28 Mon Sep 17 00:00:00 2001 From: laganina-cod <120954602+laganina-cod@users.noreply.github.com> Date: Fri, 8 Nov 2024 02:36:29 +0300 Subject: [PATCH 147/155] =?UTF-8?q?=D0=9B=D0=B0=D0=B3=D0=B0=D0=BD=D0=B8?= =?UTF-8?q?=D0=BD=D0=B0=20=D0=95=D0=BB=D0=B5=D0=BD=D0=B0.=20=D0=97=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0?= =?UTF-8?q?=D0=BD=D1=82=2012.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D0=B7?= =?UTF-8?q?=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BC=D0=B0=D1=82?= =?UTF-8?q?=D1=80=D0=B8=D1=86=D1=8B=20=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE?= =?UTF-8?q?=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20(#239)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../func_tests/main.cpp | 439 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 ++ .../perf_tests/main.cpp | 103 ++++ .../src/ops_mpi.cpp | 161 +++++++ .../func_tests/main.cpp | 258 ++++++++++ .../include/ops_seq.hpp | 26 ++ .../perf_tests/main.cpp | 92 ++++ .../src/ops_seq.cpp | 51 ++ 8 files changed, 1177 insertions(+) create mode 100644 tasks/mpi/laganina_e_sum_values_by_columns_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/laganina_e_sum_values_by_columns_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/laganina_e_sum_values_by_columns_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/laganina_e_sum_values_by_columns_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/laganina_e_sum_values_by_columns_matrix/func_tests/main.cpp create mode 100644 tasks/seq/laganina_e_sum_values_by_columns_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/laganina_e_sum_values_by_columns_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/laganina_e_sum_values_by_columns_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/laganina_e_sum_values_by_columns_matrix/func_tests/main.cpp b/tasks/mpi/laganina_e_sum_values_by_columns_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..498efd76546 --- /dev/null +++ b/tasks/mpi/laganina_e_sum_values_by_columns_matrix/func_tests/main.cpp @@ -0,0 +1,439 @@ +#include + +#include +#include +#include +#include + +#include "mpi/laganina_e_sum_values_by_columns_matrix/include/ops_mpi.hpp" + +std::vector laganina_e_sum_values_by_columns_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = (gen() % 100) - 49; + } + return vec; +} + +TEST(laganina_e_sum_values_by_columns_matrix_mpi, Test_2_2_matrix) { + boost::mpi::communicator world; + + std::vector in = {1, 2, 1, 2}; + int n = 2; + int m = 2; + std::vector empty_par(n, 0); + std::vector out = {2, 4}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(empty_par.data())); + taskDataPar->outputs_count.emplace_back(empty_par.size()); + } + + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector empty_seq(n, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty_seq.data())); + taskDataSeq->outputs_count.emplace_back(empty_seq.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(empty_par, empty_seq); + } +} + +TEST(laganina_e_sum_values_by_columns_matrix_mpi, Test_500_300_matrix) { + boost::mpi::communicator world; + + int n = 300; + int m = 500; + + // Create data + std::vector in = laganina_e_sum_values_by_columns_matrix_mpi::getRandomVector(n * m); + std::vector empty_par(n, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + + taskDataPar->outputs.emplace_back(reinterpret_cast(empty_par.data())); + taskDataPar->outputs_count.emplace_back(empty_par.size()); + } + + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector empty_seq(n, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty_seq.data())); + taskDataSeq->outputs_count.emplace_back(empty_seq.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(empty_par, empty_seq); + } +} + +TEST(laganina_e_sum_values_by_columns_matrix_mpi, partest1) { + boost::mpi::communicator world; + + int n = 2; + int m = 2; + + // Create data + std::vector in = {1, 2, 1, 2}; + std::vector empty_par(n, 0); + std::vector out = {2, 4}; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(empty_par.data())); + taskDataPar->outputs_count.emplace_back(empty_par.size()); + } + + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector empty_seq(n, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty_seq.data())); + taskDataSeq->outputs_count.emplace_back(empty_seq.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(empty_par, empty_seq); + } +} +TEST(laganina_e_sum_values_by_columns_matrix_mpi, partest2) { + boost::mpi::communicator world; + + int n = 5000; + int m = 3000; + + // Create data + std::vector in(m * n, 1); + std::vector empty_par(n, 0); + std::vector out(n, m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(empty_par.data())); + taskDataPar->outputs_count.emplace_back(empty_par.size()); + } + + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector empty_seq(n, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty_seq.data())); + taskDataSeq->outputs_count.emplace_back(empty_seq.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(empty_par, empty_seq); + } +} + +TEST(laganina_e_sum_values_by_columns_matrix_mpi, partest3) { + boost::mpi::communicator world; + + int n = 3000; + int m = 5000; + + // Create data + std::vector in(m * n, 1); + std::vector empty_par(n, 0); + std::vector out(n, m); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(empty_par.data())); + taskDataPar->outputs_count.emplace_back(empty_par.size()); + } + + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector empty_seq(n, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty_seq.data())); + taskDataSeq->outputs_count.emplace_back(empty_seq.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(empty_par, empty_seq); + } +} + +TEST(laganina_e_sum_values_by_columns_matrix_mpi, Test_14_13_20_19_13) { + int n = 5; + int m = 3; + boost::mpi::communicator world; + std::vector in; + std::vector out; + std::vector res_par(n); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = {2, 5, 6, 7, 4, 9, 4, 6, 7, 9, 3, 4, 8, 5, 0}; + out = {14, 13, 20, 19, 13}; + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(n); + } + + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_seq(n); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->outputs_count.emplace_back(n); + + // Create Task + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_par, out); + ASSERT_EQ(res_seq, out); + } +} + +TEST(laganina_e_sum_values_by_columns_matrix_mpi, Test_35_15_11_20_16_27) { + int n = 6; + int m = 3; + boost::mpi::communicator world; + std::vector in; + std::vector out; + std::vector res_par(n); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = {10, 7, 4, 8, 7, 9, 13, 4, 5, 7, 6, 9, 12, 4, 2, 5, 3, 9}; + out = {35, 15, 11, 20, 16, 27}; + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(n); + } + + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_seq(n); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->outputs_count.emplace_back(n); + + // Create Task + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_par, out); + ASSERT_EQ(res_seq, out); + } +} + +TEST(laganina_e_sum_values_by_columns_matrix_mpi, Test_30_38_28_18_21) { + int n = 5; + int m = 4; + boost::mpi::communicator world; + std::vector in; + std::vector out; + std::vector res_par(n); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = {9, 5, 3, 9, 7, 9, 13, 4, 5, 7, 7, 9, 12, 4, 0, 5, 11, 9, 0, 7}; + out = {30, 38, 28, 18, 21}; + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(res_par.data())); + taskDataPar->outputs_count.emplace_back(n); + } + + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector res_seq(n); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_seq.data())); + taskDataSeq->outputs_count.emplace_back(n); + + // Create Task + laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(res_par, out); + ASSERT_EQ(res_seq, out); + } +} \ No newline at end of file diff --git a/tasks/mpi/laganina_e_sum_values_by_columns_matrix/include/ops_mpi.hpp b/tasks/mpi/laganina_e_sum_values_by_columns_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..2c3a2ae0de4 --- /dev/null +++ b/tasks/mpi/laganina_e_sum_values_by_columns_matrix/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace laganina_e_sum_values_by_columns_matrix_mpi { + +std::vector getRandomVector(int sz); +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res_; + int m{}; + int n{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector local_input_; + std::vector input_; + std::vector res_; + int m{}; + int n{}; + + boost::mpi::communicator world; +}; + +} // namespace laganina_e_sum_values_by_columns_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/laganina_e_sum_values_by_columns_matrix/perf_tests/main.cpp b/tasks/mpi/laganina_e_sum_values_by_columns_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..02069d69c7d --- /dev/null +++ b/tasks/mpi/laganina_e_sum_values_by_columns_matrix/perf_tests/main.cpp @@ -0,0 +1,103 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/laganina_e_sum_values_by_columns_matrix/include/ops_mpi.hpp" + +TEST(laganina_e_sum_values_by_columns_matrix_mpi, test_pipeline_run) { + boost::mpi::communicator world; + + int n = 3000; + int m = 6000; + + // Create data + std::vector input(n * m, 0); + std::vector empty(n, 0); + std::vector out(n, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->outputs.emplace_back(reinterpret_cast(empty.data())); + taskDataPar->outputs_count.emplace_back(empty.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(empty, out); + } +} + +TEST(laganina_e_sum_values_by_columns_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + int n = 3000; + int m = 6000; + + // Create data + std::vector input(n * m, 0); + std::vector empty(n, 0); + std::vector out(n, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->inputs_count.emplace_back(n); + + taskDataPar->outputs.emplace_back(reinterpret_cast(empty.data())); + taskDataPar->outputs_count.emplace_back(empty.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(empty, out); + } +} \ No newline at end of file diff --git a/tasks/mpi/laganina_e_sum_values_by_columns_matrix/src/ops_mpi.cpp b/tasks/mpi/laganina_e_sum_values_by_columns_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..ea96407b638 --- /dev/null +++ b/tasks/mpi/laganina_e_sum_values_by_columns_matrix/src/ops_mpi.cpp @@ -0,0 +1,161 @@ +#include "mpi/laganina_e_sum_values_by_columns_matrix/include/ops_mpi.hpp" + +#include +#include + +bool laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + m = taskData->inputs_count[1]; + n = taskData->inputs_count[2]; + auto* ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = ptr[i]; + } + res_ = std::vector(n, 0); + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + if (taskData->inputs_count[2] != taskData->outputs_count[0]) { + return false; + } + if (taskData->inputs_count[1] < 1 || taskData->inputs_count[2] < 1) { + return false; + } + if (taskData->inputs_count[0] != taskData->inputs_count[1] * taskData->inputs_count[2]) { + return false; + } + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (int j = 0; j < n; j++) { + int sum = 0; + for (int i = 0; i < m; i++) { + sum += input_[i * n + j]; + } + res_[j] = sum; + } + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < n; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res_[i]; + } + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + int size = 0; + unsigned int delta = 0; + + if (world.rank() == 0) { + m = taskData->inputs_count[1]; + n = taskData->inputs_count[2]; + size = n * m; + if (size % world.size() == 0) { + delta = size / world.size(); + } else { + delta = size / world.size() + 1; + } + input_ = std::vector(delta * world.size()); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < n; i++) { + for (int k = i * m, r = i; r < size; r += n, k++) { + input_[k] = tmp_ptr[r]; + } + } + } + + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if (taskData->inputs_count[2] != taskData->outputs_count[0]) { + return false; + }; + if (taskData->inputs_count[1] < 1 || taskData->inputs_count[2] < 1) { + return false; + } + if (taskData->inputs_count[0] != taskData->inputs_count[1] * taskData->inputs_count[2]) { + return false; + } + return true; + } + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + int size = 0; + unsigned int delta = 0; + + if (world.rank() == 0) { + n = taskData->inputs_count[1]; + m = taskData->inputs_count[2]; + size = n * m; + if (size % world.size() == 0) { + delta = size / world.size(); + } else { + delta = size / world.size() + 1; + } + } + + broadcast(world, m, 0); + broadcast(world, n, 0); + broadcast(world, delta, 0); + + local_input_ = std::vector(delta); + boost::mpi::scatter(world, input_.data(), local_input_.data(), delta, 0); + res_.resize(m); + unsigned int last = 0; + + if (world.rank() == world.size() - 1) { + last = local_input_.size() * world.size() - n * m; + } + unsigned int id = world.rank() * local_input_.size() / n; + + for (unsigned int i = 0; i < id; i++) { + reduce(world, 0, res_[i], std::plus(), 0); + } + + delta = std::min(local_input_.size(), n - world.rank() * local_input_.size() % n); + int l_res = std::accumulate(local_input_.begin(), local_input_.begin() + delta, 0); + reduce(world, l_res, res_[id], std::plus(), 0); + id++; + unsigned int k = 0; + + while (local_input_.begin() + delta + k * n < local_input_.end() - last) { + l_res = std::accumulate(local_input_.begin() + delta + k * n, + std::min(local_input_.end(), local_input_.begin() + delta + (k + 1) * n), 0); + reduce(world, l_res, res_[id], std::plus(), 0); + k++; + id++; + } + + for (unsigned int i = id; i < res_.size(); i++) { + reduce(world, 0, res_[i], std::plus(), 0); + } + + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (int i = 0; i < m; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res_[i]; + } + } + return true; +} diff --git a/tasks/seq/laganina_e_sum_values_by_columns_matrix/func_tests/main.cpp b/tasks/seq/laganina_e_sum_values_by_columns_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..4aee44b9e03 --- /dev/null +++ b/tasks/seq/laganina_e_sum_values_by_columns_matrix/func_tests/main.cpp @@ -0,0 +1,258 @@ + +#include + +#include +#include + +#include "seq/laganina_e_sum_values_by_columns_matrix/include/ops_seq.hpp" + +std::vector laganina_e_sum_values_by_columns_matrix_seq::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = (gen() % 100) - 49; + } + return vec; +} + +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_2_2_matrix) { + int n = 2; + int m = 2; + + // Create data 555 + std::vector in = {1, 2, 1, 2}; + std::vector emp(m, 0); + std::vector out = {2, 4}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->inputs_count.emplace_back(m); + // taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(emp.data())); + taskDataSeq->outputs_count.emplace_back(emp.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(emp, out); +} + +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_500_500_matrix) { + // Create data + + int n = 500; + int m = 500; + std::vector in(m * n, 0); + std::vector empty(n, 0); + std::vector out(n, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty.data())); + taskDataSeq->outputs_count.emplace_back(empty.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out, empty); +} +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_Rand_500_500_matrix) { + // Create data + + int n = 500; + int m = 500; + std::vector in = laganina_e_sum_values_by_columns_matrix_seq::getRandomVector(m * n); + std::vector empty(n, 0); + std::vector out(n, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty.data())); + taskDataSeq->outputs_count.emplace_back(empty.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); +} +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_1000_1000_matrix) { + // Create data + + int n = 1000; + int m = 1000; + std::vector in(m * n, 0); + std::vector empty(n, 0); + std::vector out(n, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty.data())); + taskDataSeq->outputs_count.emplace_back(empty.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out, empty); +} +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_2000_2000_matrix) { + // Create data + + int n = 2000; + int m = 2000; + std::vector in(m * n, 0); + std::vector empty(n, 0); + std::vector out(n, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty.data())); + taskDataSeq->outputs_count.emplace_back(empty.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out, empty); +} +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_2_3_matrix) { + // Create data + + int n = 3; + int m = 2; + std::vector in(m * n, 1); + std::vector emp(n, 0); + std::vector out(n, m); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(emp.data())); + taskDataSeq->outputs_count.emplace_back(emp.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out, emp); +} +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_3_2_matrix) { + // Create data + + int n = 2; + int m = 3; + std::vector in(m * n, 1); + std::vector empty(n, 0); + std::vector out(n, m); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty.data())); + taskDataSeq->outputs_count.emplace_back(empty.size()); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(out, empty); +} +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_validation_output) { + // Create data + std::vector in = {1, 2, 1, 2}; + int n = 2; + int m = 2; + std::vector empty(n, 0); + std::vector out = {2, 4}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(n - 1); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_NE(testTaskSequential.validation(), true); +} +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_validation_empty) { + // Create data + std::vector in = {}; + int n = 0; + int m = 0; + std::vector empty = {}; + std::vector out = {}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(n); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_NE(testTaskSequential.validation(), true); +} +TEST(laganina_e_sum_values_by_columns_matrix_seq, Test_validation_rank) { + // Create data + std::vector in = {1, 2, 3}; + int n = 2; + int m = 2; + std::vector empty(n, 0); + std::vector out = {4, 0}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(n); + + // Create Task + laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq testTaskSequential(taskDataSeq); + ASSERT_NE(testTaskSequential.validation(), true); +} diff --git a/tasks/seq/laganina_e_sum_values_by_columns_matrix/include/ops_seq.hpp b/tasks/seq/laganina_e_sum_values_by_columns_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..d877df99077 --- /dev/null +++ b/tasks/seq/laganina_e_sum_values_by_columns_matrix/include/ops_seq.hpp @@ -0,0 +1,26 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace laganina_e_sum_values_by_columns_matrix_seq { + +std::vector getRandomVector(int sz); +class sum_values_by_columns_matrix_Seq : public ppc::core::Task { + public: + explicit sum_values_by_columns_matrix_Seq(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res_; + int m{}; + int n{}; +}; + +} // namespace laganina_e_sum_values_by_columns_matrix_seq \ No newline at end of file diff --git a/tasks/seq/laganina_e_sum_values_by_columns_matrix/perf_tests/main.cpp b/tasks/seq/laganina_e_sum_values_by_columns_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..a25c66e52ad --- /dev/null +++ b/tasks/seq/laganina_e_sum_values_by_columns_matrix/perf_tests/main.cpp @@ -0,0 +1,92 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/laganina_e_sum_values_by_columns_matrix/include/ops_seq.hpp" +TEST(laganina_e_sum_values_by_columns_matrix_seq, test_pipeline_run) { + int n = 5000; + int m = 5000; + + // Create data + std::vector in(n * m, 1); + std::vector empty(n, 0); + std::vector out(n, m); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + + // taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty.data())); + taskDataSeq->outputs_count.emplace_back(empty.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(empty, out); +} + +TEST(laganina_e_sum_values_by_columns_matrix_seq, test_task_run) { + int n = 5000; + int m = 5000; + + // Create data + std::vector in(n * m, 1); + std::vector empty(n, 0); + std::vector out(n, m); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(m); + taskDataSeq->inputs_count.emplace_back(n); + + // taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(empty.data())); + taskDataSeq->outputs_count.emplace_back(empty.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(empty, out); +} diff --git a/tasks/seq/laganina_e_sum_values_by_columns_matrix/src/ops_seq.cpp b/tasks/seq/laganina_e_sum_values_by_columns_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..5c0c5c3333f --- /dev/null +++ b/tasks/seq/laganina_e_sum_values_by_columns_matrix/src/ops_seq.cpp @@ -0,0 +1,51 @@ +#include "seq/laganina_e_sum_values_by_columns_matrix/include/ops_seq.hpp" + +#include +#include + +bool laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + m = taskData->inputs_count[1]; + n = taskData->inputs_count[2]; + auto* ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = ptr[i]; + } + res_ = std::vector(n, 0); + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq::validation() { + internal_order_test(); + if (taskData->inputs_count[2] != taskData->outputs_count[0]) { + return false; + }; + if (taskData->inputs_count[1] < 1 || taskData->inputs_count[2] < 1) { + return false; + } + if (taskData->inputs_count[0] != taskData->inputs_count[1] * taskData->inputs_count[2]) { + return false; + } + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq::run() { + internal_order_test(); + for (int j = 0; j < n; j++) { + int sum = 0; + for (int i = 0; i < m; i++) { + sum += input_[i * n + j]; + } + res_[j] = sum; + } + return true; +} + +bool laganina_e_sum_values_by_columns_matrix_seq::sum_values_by_columns_matrix_Seq::post_processing() { + internal_order_test(); + for (int i = 0; i < n; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res_[i]; + } + return true; +} From 6299377b5b8bc30736e8e210a2a1791206622a29 Mon Sep 17 00:00:00 2001 From: Anton <132440944+lolipolovkovith@users.noreply.github.com> Date: Fri, 8 Nov 2024 03:31:44 +0300 Subject: [PATCH 148/155] =?UTF-8?q?=D0=9C=D0=B0=D0=BB=D1=8B=D1=88=D0=B5?= =?UTF-8?q?=D0=B2=20=D0=90=D0=BD=D1=82=D0=BE=D0=BD.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD?= =?UTF-8?q?=D1=82=2011.=20=D0=A1=D1=83=D0=BC=D0=BC=D0=B0=20=D0=B7=D0=BD?= =?UTF-8?q?=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81?= =?UTF-8?q?=D1=82=D1=80=D0=BE=D0=BA=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80?= =?UTF-8?q?=D0=B8=D1=86=D1=8B=20(#201)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Seq: 1. Данные валидируются (количество строк матрицы должно совпадать с размером выхода) 2. Данные построчно копируются из 'taskData' 3. Строки суммируются с помощью `std::accumulate` 4. Результат копируется в `taskData` MPI: 1. Данные нулевого процесса валидируются (количество строк матрицы должно совпадать с размером выхода) 2. Данные нулевого процесса построчно копируются из 'taskData' 3. Вычисляются размеры передаваемых данных процессам. Неделящийся остаток будет ~отправлен последнему процессу~ равномерно распределен между процессами начиная с конца. (upd by [0a6e48c](https://github.com/learning-process/ppc-2024-autumn/pull/201/commits/0a6e48c6a0e7fbd8e607299de1f17dae3d091645)) 4. Данные рассылаются с помощью `scatterv` 5. Локальные строки суммируются с помощью `std::accumulate` 6. Локальные результаты собираются с помощью `gatherv` копируется в 'taskData' 7. Собранные результаты копируются в `taskData` --- .../func_tests/main.cpp | 278 ++++++++++++++++++ .../include/ops_mpi.hpp | 42 +++ .../perf_tests/main.cpp | 117 ++++++++ .../src/ops_mpi.cpp | 115 ++++++++ .../func_tests/main.cpp | 160 ++++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 114 +++++++ .../src/ops_seq.cpp | 47 +++ 8 files changed, 897 insertions(+) create mode 100644 tasks/mpi/malyshev_a_sum_rows_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/malyshev_a_sum_rows_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/malyshev_a_sum_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/malyshev_a_sum_rows_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/malyshev_a_sum_rows_matrix/func_tests/main.cpp create mode 100644 tasks/seq/malyshev_a_sum_rows_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/malyshev_a_sum_rows_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/malyshev_a_sum_rows_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/malyshev_a_sum_rows_matrix/func_tests/main.cpp b/tasks/mpi/malyshev_a_sum_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..b7c15f8cdd1 --- /dev/null +++ b/tasks/mpi/malyshev_a_sum_rows_matrix/func_tests/main.cpp @@ -0,0 +1,278 @@ +#include + +#include +#include +#include +#include + +#include "mpi/malyshev_a_sum_rows_matrix/include/ops_mpi.hpp" + +namespace malyshev_a_sum_rows_matrix_test_function { + +std::vector> getRandomData(uint32_t rows, uint32_t cols) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> data(rows, std::vector(cols)); + + for (auto &row : data) { + for (auto &el : row) { + el = -200 + gen() % (300 + 200 + 1); + } + } + + return data; +} + +} // namespace malyshev_a_sum_rows_matrix_test_function + +TEST(malyshev_a_sum_rows_matrix_mpi, rectangular_matrix_stretched_horizontally_7x17) { + uint32_t rows = 7; + uint32_t cols = 17; + + boost::mpi::communicator world; + std::vector> randomData; + std::vector mpiSum; + + std::shared_ptr taskDataPar = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskParallel taskMPI(taskDataPar); + + if (world.rank() == 0) { + randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + mpiSum.resize(rows); + + for (auto &row : randomData) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(mpiSum.data())); + taskDataPar->outputs_count.push_back(rows); + } + + ASSERT_TRUE(taskMPI.validation()); + ASSERT_TRUE(taskMPI.pre_processing()); + ASSERT_TRUE(taskMPI.run()); + ASSERT_TRUE(taskMPI.post_processing()); + + if (world.rank() == 0) { + std::vector seqSum(rows); + + std::shared_ptr taskDataSeq = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskSequential taskSeq(taskDataSeq); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(seqSum.size()); + + ASSERT_TRUE(taskSeq.validation()); + ASSERT_TRUE(taskSeq.pre_processing()); + ASSERT_TRUE(taskSeq.run()); + ASSERT_TRUE(taskSeq.post_processing()); + + for (uint32_t i = 0; i < mpiSum.size(); i++) { + ASSERT_EQ(seqSum[i], mpiSum[i]); + } + } +} + +TEST(malyshev_a_sum_rows_matrix_mpi, rectangular_matrix_stretched_verticaly_100x75) { + uint32_t rows = 100; + uint32_t cols = 75; + + boost::mpi::communicator world; + std::vector> randomData; + std::vector mpiSum; + + std::shared_ptr taskDataPar = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskParallel taskMPI(taskDataPar); + + if (world.rank() == 0) { + randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + mpiSum.resize(rows); + + for (auto &row : randomData) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(mpiSum.data())); + taskDataPar->outputs_count.push_back(rows); + } + + ASSERT_TRUE(taskMPI.validation()); + ASSERT_TRUE(taskMPI.pre_processing()); + ASSERT_TRUE(taskMPI.run()); + ASSERT_TRUE(taskMPI.post_processing()); + + if (world.rank() == 0) { + std::vector seqSum(rows); + + std::shared_ptr taskDataSeq = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskSequential taskSeq(taskDataSeq); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(seqSum.size()); + + ASSERT_TRUE(taskSeq.validation()); + ASSERT_TRUE(taskSeq.pre_processing()); + ASSERT_TRUE(taskSeq.run()); + ASSERT_TRUE(taskSeq.post_processing()); + + for (uint32_t i = 0; i < mpiSum.size(); i++) { + ASSERT_EQ(seqSum[i], mpiSum[i]); + } + } +} + +TEST(malyshev_a_sum_rows_matrix_mpi, squere_matrix_100x100) { + uint32_t rows = 100; + uint32_t cols = 100; + + boost::mpi::communicator world; + std::vector> randomData; + std::vector mpiSum; + + std::shared_ptr taskDataPar = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskParallel taskMPI(taskDataPar); + + if (world.rank() == 0) { + randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + mpiSum.resize(rows); + + for (auto &row : randomData) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(mpiSum.data())); + taskDataPar->outputs_count.push_back(rows); + } + + ASSERT_TRUE(taskMPI.validation()); + ASSERT_TRUE(taskMPI.pre_processing()); + ASSERT_TRUE(taskMPI.run()); + ASSERT_TRUE(taskMPI.post_processing()); + + if (world.rank() == 0) { + std::vector seqSum(rows); + + std::shared_ptr taskDataSeq = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskSequential taskSeq(taskDataSeq); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(seqSum.size()); + + ASSERT_TRUE(taskSeq.validation()); + ASSERT_TRUE(taskSeq.pre_processing()); + ASSERT_TRUE(taskSeq.run()); + ASSERT_TRUE(taskSeq.post_processing()); + + for (uint32_t i = 0; i < mpiSum.size(); i++) { + ASSERT_EQ(seqSum[i], mpiSum[i]); + } + } +} + +TEST(malyshev_a_sum_rows_matrix_mpi, matrix_1x1) { + uint32_t rows = 1; + uint32_t cols = 1; + + boost::mpi::communicator world; + std::vector> randomData; + std::vector mpiSum; + + std::shared_ptr taskDataPar = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskParallel taskMPI(taskDataPar); + + if (world.rank() == 0) { + randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + mpiSum.resize(rows); + + for (auto &row : randomData) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(mpiSum.data())); + taskDataPar->outputs_count.push_back(rows); + } + + ASSERT_TRUE(taskMPI.validation()); + ASSERT_TRUE(taskMPI.pre_processing()); + ASSERT_TRUE(taskMPI.run()); + ASSERT_TRUE(taskMPI.post_processing()); + + if (world.rank() == 0) { + std::vector seqSum(rows); + + std::shared_ptr taskDataSeq = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskSequential taskSeq(taskDataSeq); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(seqSum.size()); + + ASSERT_TRUE(taskSeq.validation()); + ASSERT_TRUE(taskSeq.pre_processing()); + ASSERT_TRUE(taskSeq.run()); + ASSERT_TRUE(taskSeq.post_processing()); + + for (uint32_t i = 0; i < mpiSum.size(); i++) { + ASSERT_EQ(seqSum[i], mpiSum[i]); + } + } +} + +TEST(malyshev_a_sum_rows_matrix_mpi, test_validation) { + uint32_t rows = 7; + uint32_t cols = 17; + + boost::mpi::communicator world; + std::vector> randomData; + std::vector mpiSum; + + std::shared_ptr taskDataPar = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskParallel taskMPI(taskDataPar); + + if (world.rank() == 0) { + randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + mpiSum.resize(rows); + + for (auto &row : randomData) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(mpiSum.data())); + taskDataPar->outputs_count.push_back(0); + + ASSERT_FALSE(taskMPI.validation()); + } +} \ No newline at end of file diff --git a/tasks/mpi/malyshev_a_sum_rows_matrix/include/ops_mpi.hpp b/tasks/mpi/malyshev_a_sum_rows_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..20cf03f331f --- /dev/null +++ b/tasks/mpi/malyshev_a_sum_rows_matrix/include/ops_mpi.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace malyshev_a_sum_rows_matrix_mpi { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res_; +}; + +class TestTaskParallel : public ppc::core::Task { + public: + explicit TestTaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_, local_input_; + std::vector res_, local_res_; + uint32_t delta_, ext_; + + boost::mpi::communicator world; +}; + +} // namespace malyshev_a_sum_rows_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/malyshev_a_sum_rows_matrix/perf_tests/main.cpp b/tasks/mpi/malyshev_a_sum_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..59215f1242d --- /dev/null +++ b/tasks/mpi/malyshev_a_sum_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,117 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/malyshev_a_sum_rows_matrix/include/ops_mpi.hpp" + +namespace malyshev_a_sum_rows_matrix_test_function { + +std::vector> getRandomData(uint32_t rows, uint32_t cols) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> data(rows, std::vector(cols)); + + for (auto &row : data) { + for (auto &el : row) { + el = -200 + gen() % (300 + 200 + 1); + } + } + + return data; +} + +} // namespace malyshev_a_sum_rows_matrix_test_function + +TEST(malyshev_a_sum_rows_matrix_mpi, test_pipeline_run) { + uint32_t rows = 3000; + uint32_t cols = 3000; + + boost::mpi::communicator world; + std::vector> randomData; + std::vector mpiSum; + + std::shared_ptr taskDataPar = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskParallel taskMPI(taskDataPar); + + if (world.rank() == 0) { + randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + mpiSum.resize(rows); + + for (auto &row : randomData) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(mpiSum.data())); + taskDataPar->outputs_count.push_back(rows); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + ASSERT_TRUE(testMpiTaskParallel->pre_processing()); + ASSERT_TRUE(testMpiTaskParallel->run()); + ASSERT_TRUE(testMpiTaskParallel->post_processing()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) ppc::core::Perf::print_perf_statistic(perfResults); +} + +TEST(malyshev_a_sum_rows_matrix_mpi, test_task_run) { + uint32_t rows = 3000; + uint32_t cols = 3000; + + boost::mpi::communicator world; + std::vector> randomData; + std::vector mpiSum; + + std::shared_ptr taskDataPar = std::make_shared(); + malyshev_a_sum_rows_matrix_mpi::TestTaskParallel taskMPI(taskDataPar); + + if (world.rank() == 0) { + randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + mpiSum.resize(rows); + + for (auto &row : randomData) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataPar->inputs_count.push_back(rows); + taskDataPar->inputs_count.push_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(mpiSum.data())); + taskDataPar->outputs_count.push_back(rows); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + ASSERT_TRUE(testMpiTaskParallel->pre_processing()); + ASSERT_TRUE(testMpiTaskParallel->run()); + ASSERT_TRUE(testMpiTaskParallel->post_processing()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) ppc::core::Perf::print_perf_statistic(perfResults); +} \ No newline at end of file diff --git a/tasks/mpi/malyshev_a_sum_rows_matrix/src/ops_mpi.cpp b/tasks/mpi/malyshev_a_sum_rows_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..1b09de9cc9d --- /dev/null +++ b/tasks/mpi/malyshev_a_sum_rows_matrix/src/ops_mpi.cpp @@ -0,0 +1,115 @@ +#include "mpi/malyshev_a_sum_rows_matrix/include/ops_mpi.hpp" + +#include +#include +#include + +bool malyshev_a_sum_rows_matrix_mpi::TestTaskSequential ::pre_processing() { + internal_order_test(); + + uint32_t rows = taskData->inputs_count[0]; + uint32_t cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + res_.resize(rows); + + int32_t* data; + for (uint32_t i = 0; i < input_.size(); i++) { + data = reinterpret_cast(taskData->inputs[i]); + std::copy(data, data + cols, input_[i].data()); + } + + return true; +} + +bool malyshev_a_sum_rows_matrix_mpi::TestTaskSequential::validation() { + internal_order_test(); + + return taskData->outputs_count[0] == taskData->inputs_count[0]; +} + +bool malyshev_a_sum_rows_matrix_mpi::TestTaskSequential ::run() { + internal_order_test(); + + for (uint32_t i = 0; i < input_.size(); i++) { + res_[i] = std::accumulate(input_[i].begin(), input_[i].end(), 0); + } + + return true; +} + +bool malyshev_a_sum_rows_matrix_mpi::TestTaskSequential ::post_processing() { + internal_order_test(); + + std::copy(res_.begin(), res_.end(), reinterpret_cast(taskData->outputs[0])); + + return true; +} + +bool malyshev_a_sum_rows_matrix_mpi::TestTaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + uint32_t rows = taskData->inputs_count[0]; + uint32_t cols = taskData->inputs_count[1]; + + delta_ = rows / world.size(); + ext_ = rows % world.size(); + + input_.resize(rows, std::vector(cols)); + res_.resize(rows); + + int32_t* data; + for (uint32_t i = 0; i < input_.size(); i++) { + data = reinterpret_cast(taskData->inputs[i]); + std::copy(data, data + cols, input_[i].data()); + } + } + + return true; +} + +bool malyshev_a_sum_rows_matrix_mpi::TestTaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + return taskData->outputs_count[0] == taskData->inputs_count[0]; + } + + return true; +} + +bool malyshev_a_sum_rows_matrix_mpi::TestTaskParallel::run() { + internal_order_test(); + + broadcast(world, delta_, 0); + broadcast(world, ext_, 0); + + std::vector sizes(world.size(), delta_); + for (uint32_t i = 0; i < ext_; i++) { + sizes[world.size() - i - 1]++; + } + + local_input_.resize(sizes[world.rank()]); + local_res_.resize(sizes[world.rank()]); + + scatterv(world, input_, sizes, local_input_.data(), 0); + + for (uint32_t i = 0; i < local_input_.size(); i++) { + local_res_[i] = std::accumulate(local_input_[i].begin(), local_input_[i].end(), 0); + } + + gatherv(world, local_res_, res_.data(), sizes, 0); + + return true; +} + +bool malyshev_a_sum_rows_matrix_mpi::TestTaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + std::copy(res_.begin(), res_.end(), reinterpret_cast(taskData->outputs[0])); + } + + return true; +} diff --git a/tasks/seq/malyshev_a_sum_rows_matrix/func_tests/main.cpp b/tasks/seq/malyshev_a_sum_rows_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..6215051adf8 --- /dev/null +++ b/tasks/seq/malyshev_a_sum_rows_matrix/func_tests/main.cpp @@ -0,0 +1,160 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "seq/malyshev_a_sum_rows_matrix/include/ops_seq.hpp" + +namespace malyshev_a_sum_rows_matrix_test_function { + +std::vector> getRandomData(uint32_t rows, uint32_t cols) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> data(rows, std::vector(cols)); + + for (auto &row : data) { + for (auto &el : row) { + el = -200 + gen() % (300 + 200 + 1); + } + } + + return data; +} + +} // namespace malyshev_a_sum_rows_matrix_test_function + +TEST(malyshev_a_sum_rows_matrix_seq, rectangular_matrix_stretched_horizontally_7x17) { + uint32_t rows = 7; + uint32_t cols = 17; + + std::vector seqSum(rows); + std::vector> data(rows, std::vector(cols, 1)); + + std::shared_ptr taskDataSeq = std::make_shared(); + malyshev_a_sum_rows_matrix_seq::TestTaskSequential taskSeq(taskDataSeq); + + for (auto &row : data) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(seqSum.size()); + + ASSERT_TRUE(taskSeq.validation()); + ASSERT_TRUE(taskSeq.pre_processing()); + ASSERT_TRUE(taskSeq.run()); + ASSERT_TRUE(taskSeq.post_processing()); + + for (uint32_t i = 0; i < seqSum.size(); i++) { + ASSERT_EQ(seqSum[i], (int32_t)cols); + } +} + +TEST(malyshev_a_sum_rows_matrix_seq, rectangular_matrix_stretched_verticaly_100x75) { + uint32_t rows = 100; + uint32_t cols = 75; + + std::vector seqSum(rows); + std::vector> randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + std::shared_ptr taskDataSeq = std::make_shared(); + malyshev_a_sum_rows_matrix_seq::TestTaskSequential taskSeq(taskDataSeq); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(seqSum.size()); + + ASSERT_TRUE(taskSeq.validation()); + ASSERT_TRUE(taskSeq.pre_processing()); + ASSERT_TRUE(taskSeq.run()); + ASSERT_TRUE(taskSeq.post_processing()); + + for (uint32_t i = 0; i < seqSum.size(); i++) { + ASSERT_EQ(seqSum[i], std::accumulate(randomData[i].begin(), randomData[i].end(), 0)); + } +} + +TEST(malyshev_a_sum_rows_matrix_seq, squere_matrix_100x100) { + uint32_t rows = 100; + uint32_t cols = 100; + + std::vector seqSum(rows); + std::vector> randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + std::shared_ptr taskDataSeq = std::make_shared(); + malyshev_a_sum_rows_matrix_seq::TestTaskSequential taskSeq(taskDataSeq); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(seqSum.size()); + + ASSERT_TRUE(taskSeq.validation()); + ASSERT_TRUE(taskSeq.pre_processing()); + ASSERT_TRUE(taskSeq.run()); + ASSERT_TRUE(taskSeq.post_processing()); + + for (uint32_t i = 0; i < seqSum.size(); i++) { + ASSERT_EQ(seqSum[i], std::accumulate(randomData[i].begin(), randomData[i].end(), 0)); + } +} + +TEST(malyshev_a_sum_rows_matrix_seq, matrix_1x1) { + uint32_t rows = 1; + uint32_t cols = 1; + + std::vector seqSum(rows); + std::vector> randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + std::shared_ptr taskDataSeq = std::make_shared(); + malyshev_a_sum_rows_matrix_seq::TestTaskSequential taskSeq(taskDataSeq); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(seqSum.size()); + + ASSERT_TRUE(taskSeq.validation()); + ASSERT_TRUE(taskSeq.pre_processing()); + ASSERT_TRUE(taskSeq.run()); + ASSERT_TRUE(taskSeq.post_processing()); + + for (uint32_t i = 0; i < seqSum.size(); i++) { + ASSERT_EQ(seqSum[i], std::accumulate(randomData[i].begin(), randomData[i].end(), 0)); + } +} + +TEST(malyshev_a_sum_rows_matrix_seq, test_validation) { + uint32_t rows = 7; + uint32_t cols = 17; + + std::vector seqSum(rows); + std::vector> randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + std::shared_ptr taskDataSeq = std::make_shared(); + malyshev_a_sum_rows_matrix_seq::TestTaskSequential taskSeq(taskDataSeq); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(0); + + ASSERT_FALSE(taskSeq.validation()); +} \ No newline at end of file diff --git a/tasks/seq/malyshev_a_sum_rows_matrix/include/ops_seq.hpp b/tasks/seq/malyshev_a_sum_rows_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..029e9deca10 --- /dev/null +++ b/tasks/seq/malyshev_a_sum_rows_matrix/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace malyshev_a_sum_rows_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace malyshev_a_sum_rows_matrix_seq \ No newline at end of file diff --git a/tasks/seq/malyshev_a_sum_rows_matrix/perf_tests/main.cpp b/tasks/seq/malyshev_a_sum_rows_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..7e3b823f64c --- /dev/null +++ b/tasks/seq/malyshev_a_sum_rows_matrix/perf_tests/main.cpp @@ -0,0 +1,114 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/malyshev_a_sum_rows_matrix/include/ops_seq.hpp" + +namespace malyshev_a_sum_rows_matrix_test_function { + +std::vector> getRandomData(uint32_t rows, uint32_t cols) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> data(rows, std::vector(cols)); + + for (auto &row : data) { + for (auto &el : row) { + el = -200 + gen() % (300 + 200 + 1); + } + } + + return data; +} + +} // namespace malyshev_a_sum_rows_matrix_test_function + +TEST(malyshev_a_sum_rows_matrix_seq, test_pipeline_run) { + uint32_t rows = 3000; + uint32_t cols = 3000; + + std::vector> randomData; + std::vector seqSum; + + std::shared_ptr taskDataSeq = std::make_shared(); + + randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + seqSum.resize(rows); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(rows); + + auto taskSeq = std::make_shared(taskDataSeq); + + ASSERT_TRUE(taskSeq->validation()); + ASSERT_TRUE(taskSeq->pre_processing()); + ASSERT_TRUE(taskSeq->run()); + ASSERT_TRUE(taskSeq->post_processing()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(taskSeq); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} + +TEST(malyshev_a_sum_rows_matrix_seq, test_task_run) { + uint32_t rows = 3000; + uint32_t cols = 3000; + + std::vector> randomData; + std::vector seqSum; + + std::shared_ptr taskDataSeq = std::make_shared(); + + randomData = malyshev_a_sum_rows_matrix_test_function::getRandomData(rows, cols); + seqSum.resize(rows); + + for (auto &row : randomData) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.push_back(rows); + taskDataSeq->inputs_count.push_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seqSum.data())); + taskDataSeq->outputs_count.push_back(rows); + + auto taskSeq = std::make_shared(taskDataSeq); + + ASSERT_TRUE(taskSeq->validation()); + ASSERT_TRUE(taskSeq->pre_processing()); + ASSERT_TRUE(taskSeq->run()); + ASSERT_TRUE(taskSeq->post_processing()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(taskSeq); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} \ No newline at end of file diff --git a/tasks/seq/malyshev_a_sum_rows_matrix/src/ops_seq.cpp b/tasks/seq/malyshev_a_sum_rows_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..a3ea436373e --- /dev/null +++ b/tasks/seq/malyshev_a_sum_rows_matrix/src/ops_seq.cpp @@ -0,0 +1,47 @@ +#include "seq/malyshev_a_sum_rows_matrix/include/ops_seq.hpp" + +#include +#include +#include + +bool malyshev_a_sum_rows_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + uint32_t rows = taskData->inputs_count[0]; + uint32_t cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + res_.resize(rows); + + int32_t* data; + for (uint32_t i = 0; i < input_.size(); i++) { + data = reinterpret_cast(taskData->inputs[i]); + std::copy(data, data + cols, input_[i].data()); + } + + return true; +} + +bool malyshev_a_sum_rows_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + + return taskData->outputs_count[0] == taskData->inputs_count[0]; +} + +bool malyshev_a_sum_rows_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + for (uint32_t i = 0; i < input_.size(); i++) { + res_[i] = std::accumulate(input_[i].begin(), input_[i].end(), 0); + } + + return true; +} + +bool malyshev_a_sum_rows_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + std::copy(res_.begin(), res_.end(), reinterpret_cast(taskData->outputs[0])); + + return true; +} From 26d901a9573114a91e7990aa776aace63ab86250 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Fri, 8 Nov 2024 08:41:41 +0800 Subject: [PATCH 149/155] =?UTF-8?q?Revert=20"=D0=A2=D0=B0=D1=80=D0=B0?= =?UTF-8?q?=D0=BA=D0=B0=D0=BD=D0=BE=D0=B2=20=D0=94=D0=B5=D0=BD=D0=B8=D1=81?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2020.=20=D0=98=D0=BD=D1=82=D0=B5?= =?UTF-8?q?=D0=B3=D1=80=D0=B8=D1=80=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B5=20?= =?UTF-8?q?-=20=D0=BC=D0=B5=D1=82=D0=BE=D0=B4=20=D1=82=D1=80=D0=B0=D0=BF?= =?UTF-8?q?=D0=B5=D1=86=D0=B8=D0=B9."=20(#260)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#161 https://github.com/learning-process/ppc-2024-autumn/actions/runs/11733168860/job/32686882712 image --- .../func_tests/main.cpp | 194 ------------------ .../include/ops_mpi.hpp | 48 ----- .../perf_tests/main.cpp | 96 --------- .../src/ops_mpi.cpp | 109 ---------- .../func_tests/main.cpp | 66 ------ .../include/ops_seq.hpp | 25 --- .../perf_tests/main.cpp | 80 -------- .../src/ops_seq.cpp | 44 ---- 8 files changed, 662 deletions(-) delete mode 100644 tasks/mpi/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp delete mode 100644 tasks/mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp delete mode 100644 tasks/mpi/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp delete mode 100644 tasks/mpi/tarakanov_d_integration_the_trapezoid_method/src/ops_mpi.cpp delete mode 100644 tasks/seq/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp delete mode 100644 tasks/seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp delete mode 100644 tasks/seq/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp delete mode 100644 tasks/seq/tarakanov_d_integration_the_trapezoid_method/src/ops_seq.cpp diff --git a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp deleted file mode 100644 index 2b8f93a7ed7..00000000000 --- a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2024 Tarakanov Denis -#include - -#include -#include -#include -#include - -#include "mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp" - -TEST(tarakanov_d_integration_the_trapezoid_method_mpi_func_tests, Test_Integration1) { - boost::mpi::communicator world; - std::vector global_res(1, 0.0); - - std::shared_ptr taskDataPar = std::make_shared(); - double a = 0.0; - double b = 1.0; - double h = 1e-8; - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); - taskDataPar->inputs_count.emplace_back(3); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); - taskDataPar->outputs_count.emplace_back(1); - } - - tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par parallelTask(taskDataPar); - - ASSERT_EQ(parallelTask.validation(), true); - parallelTask.pre_processing(); - parallelTask.run(); - parallelTask.post_processing(); - - if (world.rank() == 0) { - std::vector reference_res(1, 0.0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&h)); - taskDataSeq->inputs_count.emplace_back(3); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); - taskDataSeq->outputs_count.emplace_back(1); - - tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq sequentialTask(taskDataSeq); - ASSERT_EQ(sequentialTask.validation(), true); - sequentialTask.pre_processing(); - sequentialTask.run(); - sequentialTask.post_processing(); - - ASSERT_NEAR(reference_res[0], global_res[0], 0.1); - } -} - -TEST(tarakanov_d_integration_the_trapezoid_method_mpi_func_tests, Test_Integration2) { - boost::mpi::communicator world; - std::vector global_res(1, 0.0); - - std::shared_ptr taskDataPar = std::make_shared(); - double a = 5.0; - double b = 7.0; - double h = 1e-8; - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); - taskDataPar->inputs_count.emplace_back(3); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); - taskDataPar->outputs_count.emplace_back(1); - } - - tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par parallelTask(taskDataPar); - - ASSERT_EQ(parallelTask.validation(), true); - parallelTask.pre_processing(); - parallelTask.run(); - parallelTask.post_processing(); - - if (world.rank() == 0) { - std::vector reference_res(1, 0.0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&h)); - taskDataSeq->inputs_count.emplace_back(3); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); - taskDataSeq->outputs_count.emplace_back(1); - - tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq sequentialTask(taskDataSeq); - ASSERT_EQ(sequentialTask.validation(), true); - sequentialTask.pre_processing(); - sequentialTask.run(); - sequentialTask.post_processing(); - - ASSERT_NEAR(reference_res[0], global_res[0], 0.1); - } -} - -TEST(tarakanov_d_integration_the_trapezoid_method_mpi_func_tests, Test_Integration3) { - boost::mpi::communicator world; - std::vector global_res(1, 0.0); - - std::shared_ptr taskDataPar = std::make_shared(); - double a = -2.0; - double b = -1.0; - double h = 1e-8; - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); - taskDataPar->inputs_count.emplace_back(3); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); - taskDataPar->outputs_count.emplace_back(1); - } - - tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par parallelTask(taskDataPar); - - ASSERT_EQ(parallelTask.validation(), true); - parallelTask.pre_processing(); - parallelTask.run(); - parallelTask.post_processing(); - - if (world.rank() == 0) { - std::vector reference_res(1, 0.0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&h)); - taskDataSeq->inputs_count.emplace_back(3); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); - taskDataSeq->outputs_count.emplace_back(1); - - tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq sequentialTask(taskDataSeq); - ASSERT_EQ(sequentialTask.validation(), true); - sequentialTask.pre_processing(); - sequentialTask.run(); - sequentialTask.post_processing(); - - ASSERT_NEAR(reference_res[0], global_res[0], 0.1); - } -} - -TEST(tarakanov_d_integration_the_trapezoid_method_mpi_func_tests, Test_Integration_random_data) { - std::srand(static_cast(std::time(nullptr))); - - boost::mpi::communicator world; - std::vector global_res(1, 0.0); - - std::shared_ptr taskDataPar = std::make_shared(); - double a = std::rand() % (100); - double b = a + std::rand() % (5); - double h = 1e-8; - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); - taskDataPar->inputs_count.emplace_back(3); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); - taskDataPar->outputs_count.emplace_back(1); - } - - tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par parallelTask(taskDataPar); - - ASSERT_EQ(parallelTask.validation(), true); - parallelTask.pre_processing(); - parallelTask.run(); - parallelTask.post_processing(); - - if (world.rank() == 0) { - std::vector reference_res(1, 0.0); - - std::shared_ptr taskDataSeq = std::make_shared(); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); - taskDataSeq->inputs.emplace_back(reinterpret_cast(&h)); - taskDataSeq->inputs_count.emplace_back(3); - taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); - taskDataSeq->outputs_count.emplace_back(1); - - tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq sequentialTask(taskDataSeq); - ASSERT_EQ(sequentialTask.validation(), true); - sequentialTask.pre_processing(); - sequentialTask.run(); - sequentialTask.post_processing(); - - ASSERT_NEAR(reference_res[0], global_res[0], 0.1); - } -} \ No newline at end of file diff --git a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp deleted file mode 100644 index 3ecfc7e0c44..00000000000 --- a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2024 Tarakanov Denis -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "core/task/include/task.hpp" - -namespace tarakanov_d_integration_the_trapezoid_method_mpi { - -class integration_the_trapezoid_method_seq : public ppc::core::Task { - public: - explicit integration_the_trapezoid_method_seq(std::shared_ptr taskData_) - : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - double a{}, b{}, h{}, res{}; - - static double f(double x) { return x * x; }; -}; - -class integration_the_trapezoid_method_par : public ppc::core::Task { - public: - explicit integration_the_trapezoid_method_par(std::shared_ptr taskData_) - : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - double res{}; - - double a{}, b{}, h{}, local_a{}; - uint32_t partsCount{}, localPartsCount{}; - - static double f(double x) { return x * x; }; - - boost::mpi::communicator world; -}; - -} // namespace tarakanov_d_integration_the_trapezoid_method_mpi \ No newline at end of file diff --git a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp deleted file mode 100644 index a8b29e4a3ba..00000000000 --- a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2024 Tarakanov Denis -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp" - -TEST(tarakanov_d_integration_the_trapezoid_method_mpi_perf_tests, test_pipeline_run) { - boost::mpi::communicator world; - std::vector global_res(1, 0.0); - - double a = 0.0; - double b = 1.0; - double h = 1e-8; - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); - taskDataPar->inputs_count.emplace_back(3); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); - taskDataPar->outputs_count.emplace_back(1); - } - - auto parallelTask = - std::make_shared( - taskDataPar); - - ASSERT_EQ(parallelTask->validation(), true); - parallelTask->pre_processing(); - parallelTask->run(); - parallelTask->post_processing(); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(parallelTask); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - double expected_value = 0.335; - ASSERT_NEAR(expected_value, global_res[0], 0.1); - } -} - -TEST(tarakanov_d_integration_the_trapezoid_method_mpi_perf_tests, test_task_run) { - boost::mpi::communicator world; - std::vector global_res(1, 0.0); - - double a = 0.0; - double b = 1.0; - double h = 1e-8; - std::shared_ptr taskDataPar = std::make_shared(); - - if (world.rank() == 0) { - taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); - taskDataPar->inputs.emplace_back(reinterpret_cast(&h)); - taskDataPar->inputs_count.emplace_back(3); - taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); - taskDataPar->outputs_count.emplace_back(1); - } - - auto parallelTask = - std::make_shared( - taskDataPar); - - ASSERT_EQ(parallelTask->validation(), true); - parallelTask->pre_processing(); - parallelTask->run(); - parallelTask->post_processing(); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const boost::mpi::timer current_timer; - perfAttr->current_timer = [&] { return current_timer.elapsed(); }; - - auto perfResults = std::make_shared(); - - auto perfAnalyzer = std::make_shared(parallelTask); - perfAnalyzer->task_run(perfAttr, perfResults); - - if (world.rank() == 0) { - ppc::core::Perf::print_perf_statistic(perfResults); - double expected_value = 0.335; - ASSERT_NEAR(expected_value, global_res[0], 0.1); - } -} \ No newline at end of file diff --git a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/src/ops_mpi.cpp b/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/src/ops_mpi.cpp deleted file mode 100644 index 707abc36295..00000000000 --- a/tasks/mpi/tarakanov_d_integration_the_trapezoid_method/src/ops_mpi.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2024 Tarakanov Denis -#include "mpi/tarakanov_d_integration_the_trapezoid_method/include/ops_mpi.hpp" - -#include - -bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq::pre_processing() { - internal_order_test(); - - // Init value for input and output - a = *reinterpret_cast(taskData->inputs[0]); - b = *reinterpret_cast(taskData->inputs[1]); - h = *reinterpret_cast(taskData->inputs[2]); - res = 0; - return true; -} - -bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq::validation() { - internal_order_test(); - - // Check count elements of output - return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; -} - -bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq::run() { - internal_order_test(); - - int n = static_cast((b - a) / h); - double integral = 0.0; - - // summing trapezoid areas - for (int i = 1; i < n; ++i) { - double x0 = a + i * h; // left trapezoid edge - double x1 = a + (i + 1) * h; // right trapezoid edge - integral += 0.5 * (f(x0) + f(x1)); // trapezoid area - } - integral *= h; - - res = integral; - - return true; -} - -bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_seq::post_processing() { - internal_order_test(); - - *reinterpret_cast(taskData->outputs[0]) = res; - return true; -} - -bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par::pre_processing() { - internal_order_test(); - - // Init value for input and output - if (world.rank() == 0) { - a = *reinterpret_cast(taskData->inputs[0]); - b = *reinterpret_cast(taskData->inputs[1]); - h = *reinterpret_cast(taskData->inputs[2]); - res = 0; - } - - return true; -} - -bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par::validation() { - internal_order_test(); - // Check count elements of output - if (world.rank() == 0) { - uint32_t tmp1 = taskData->inputs_count[0]; - uint32_t tmp2 = taskData->outputs_count[0]; - return tmp1 == 3 && tmp2 == 1; - } - return true; -} - -bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par::run() { - internal_order_test(); - - boost::mpi::broadcast(world, a, 0); - boost::mpi::broadcast(world, b, 0); - boost::mpi::broadcast(world, h, 0); - - partsCount = (b - a) / h; - localPartsCount = partsCount / world.size(); - localPartsCount = world.rank() < static_cast(partsCount) % world.size() ? localPartsCount + 1 : localPartsCount; - - local_a = a + world.rank() * localPartsCount * h; - - double local_res = 0.0; - local_res += (f(local_a) + f(local_a + localPartsCount * h)) * 0.5; - - for (unsigned int i = 0; i < localPartsCount; ++i) { - double x0 = local_a + i * h; // left trapezoid edge - double x1 = local_a + (i + 1) * h; // right trapezoid edge - local_res += 0.5 * (f(x0) + f(x1)); // trapezoid area - } - local_res *= h; - - boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); - - return true; -} - -bool tarakanov_d_integration_the_trapezoid_method_mpi::integration_the_trapezoid_method_par::post_processing() { - internal_order_test(); - if (world.rank() == 0) { - *reinterpret_cast(taskData->outputs[0]) = res; - } - return true; -} \ No newline at end of file diff --git a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp deleted file mode 100644 index 9c19c2b1e6a..00000000000 --- a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/func_tests/main.cpp +++ /dev/null @@ -1,66 +0,0 @@ -#include - -#include - -#include "seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp" - -using namespace tarakanov_d_integration_the_trapezoid_method_seq; - -auto createTaskData(double* a, double* b, double* h, double* res) { - auto data = std::make_shared(); - - data->inputs.push_back(reinterpret_cast(a)); - data->inputs.push_back(reinterpret_cast(b)); - data->inputs.push_back(reinterpret_cast(h)); - - data->inputs_count.push_back(3); - - data->outputs.push_back(reinterpret_cast(res)); - data->outputs_count.push_back(1); - - return data; -} - -TEST(tarakanov_d_integration_the_trapezoid_method_func_test, ValidationWorks) { - double a = 0.0; - double b = 1.0; - double h = 0.1; - double res = 0.0; - auto data = createTaskData(&a, &b, &h, &res); - - integration_the_trapezoid_method task(data); - - EXPECT_TRUE(task.validation()); -} - -TEST(tarakanov_d_integration_the_trapezoid_method_func_test, PreProcessingWorks) { - double a = 0.0; - double b = 1.0; - double h = 0.1; - double res = 0.0; - auto data = createTaskData(&a, &b, &h, &res); - integration_the_trapezoid_method task(data); - - EXPECT_TRUE(task.validation()); - EXPECT_TRUE(task.pre_processing()); - EXPECT_EQ(task.get_data()->inputs_count[0], 3.0); - EXPECT_EQ(task.get_data()->outputs_count[0], 1.0); -} - -TEST(tarakanov_d_integration_the_trapezoid_method_func_test, PostProcessingWorks) { - double a = 0.0; - double b = 1.0; - double h = 0.1; - double res = 0.0; - auto data = createTaskData(&a, &b, &h, &res); - - integration_the_trapezoid_method task(data); - EXPECT_TRUE(task.validation()); - EXPECT_TRUE(task.pre_processing()); - EXPECT_TRUE(task.run()); - EXPECT_TRUE(task.post_processing()); - - double output = *reinterpret_cast(data->outputs[0]); - bool flag = output == 0.0; - EXPECT_FALSE(flag); -} diff --git a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp deleted file mode 100644 index 465050c2b05..00000000000 --- a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2024 Tarakanov Denis -#pragma once - -#include - -#include "core/task/include/task.hpp" - -namespace tarakanov_d_integration_the_trapezoid_method_seq { - -class integration_the_trapezoid_method : public ppc::core::Task { - public: - explicit integration_the_trapezoid_method(std::shared_ptr taskData_) - : Task(std::move(taskData_)) {} - bool pre_processing() override; - bool validation() override; - bool run() override; - bool post_processing() override; - - private: - double a{}, b{}, h{}, res{}; - - static double f(double x) { return x * x; }; -}; - -} // namespace tarakanov_d_integration_the_trapezoid_method_seq \ No newline at end of file diff --git a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp deleted file mode 100644 index d0c641be753..00000000000 --- a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/perf_tests/main.cpp +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2023 Tarakanov Denis -#include - -#include -#include - -#include "core/perf/include/perf.hpp" -#include "seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp" - -using namespace tarakanov_d_integration_the_trapezoid_method_seq; - -TEST(trapezoid_method_perf_test, test_pipeline_run) { - double a = 0.0; - double b = 1.0; - double h = 0.1; - - auto taskData = std::make_shared(); - taskData->inputs.push_back(reinterpret_cast(&a)); - taskData->inputs.push_back(reinterpret_cast(&b)); - taskData->inputs.push_back(reinterpret_cast(&h)); - taskData->inputs_count.push_back(3); - - double out = 0.0; - taskData->outputs.push_back(reinterpret_cast(&out)); - taskData->outputs_count.push_back(1); - - auto task = std::make_shared(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(task); - perfAnalyzer->pipeline_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - double expected_result = 0.335; - EXPECT_DOUBLE_EQ(out, expected_result); -} - -TEST(trapezoid_method_perf_test, test_task_run) { - double a = 0.0; - double b = 1.0; - double h = 0.1; - - auto taskData = std::make_shared(); - taskData->inputs.push_back(reinterpret_cast(&a)); - taskData->inputs.push_back(reinterpret_cast(&b)); - taskData->inputs.push_back(reinterpret_cast(&h)); - taskData->inputs_count.push_back(3); - - double out = 0.0; - taskData->outputs.push_back(reinterpret_cast(&out)); - taskData->outputs_count.push_back(1); - - auto task = std::make_shared(taskData); - - auto perfAttr = std::make_shared(); - perfAttr->num_running = 10; - const auto t0 = std::chrono::high_resolution_clock::now(); - perfAttr->current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - - auto perfResults = std::make_shared(); - auto perfAnalyzer = std::make_shared(task); - perfAnalyzer->task_run(perfAttr, perfResults); - ppc::core::Perf::print_perf_statistic(perfResults); - - double expected_result = 0.335; - EXPECT_DOUBLE_EQ(out, expected_result); -} \ No newline at end of file diff --git a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/src/ops_seq.cpp b/tasks/seq/tarakanov_d_integration_the_trapezoid_method/src/ops_seq.cpp deleted file mode 100644 index 338db148881..00000000000 --- a/tasks/seq/tarakanov_d_integration_the_trapezoid_method/src/ops_seq.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2024 Tarakanov Denis -#include "seq/tarakanov_d_integration_the_trapezoid_method/include/ops_seq.hpp" - -bool tarakanov_d_integration_the_trapezoid_method_seq::integration_the_trapezoid_method::pre_processing() { - internal_order_test(); - - // Init value for input and output - a = *reinterpret_cast(taskData->inputs[0]); - b = *reinterpret_cast(taskData->inputs[1]); - h = *reinterpret_cast(taskData->inputs[2]); - res = 0; - return true; -} - -bool tarakanov_d_integration_the_trapezoid_method_seq::integration_the_trapezoid_method::validation() { - internal_order_test(); - - // Check count elements of output - return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; -} - -bool tarakanov_d_integration_the_trapezoid_method_seq::integration_the_trapezoid_method::run() { - internal_order_test(); - - int n = static_cast((b - a) / h); - double integral = 0.0; - - // summing trapezoid areas - for (int i = 0; i < n; ++i) { - double x0 = a + i * h; // left trapezoid edge - double x1 = a + (i + 1) * h; // right trapezoid edge - integral += 0.5 * (x0 * x0 + x1 * x1) * h; // trapezoid area - } - - res = integral; - - return true; -} - -bool tarakanov_d_integration_the_trapezoid_method_seq::integration_the_trapezoid_method::post_processing() { - internal_order_test(); - *reinterpret_cast(taskData->outputs[0]) = res; - return true; -} From dc87556b86d0b3abd0fee16f31535245fd32abbf Mon Sep 17 00:00:00 2001 From: Denis Zaytsev <131779885+DZaytsev4@users.noreply.github.com> Date: Fri, 8 Nov 2024 18:12:23 +0300 Subject: [PATCH 150/155] =?UTF-8?q?=D0=97=D0=B0=D0=B9=D1=86=D0=B5=D0=B2=20?= =?UTF-8?q?=D0=94=D0=B5=D0=BD=D0=B8=D1=81.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=205.=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=87=D0=B5=D1=80?= =?UTF-8?q?=D0=B5=D0=B4=D0=BE=D0=B2=D0=B0=D0=BD=D0=B8=D0=B9=20=D0=B7=D0=BD?= =?UTF-8?q?=D0=B0=D0=BA=D0=BE=D0=B2=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B9=20=D1=81=D0=BE=D1=81=D0=B5=D0=B4=D0=BD=D0=B8=D1=85?= =?UTF-8?q?=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0=20(#199)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Последовательное выполнение** 1. Создание вектора 2. Проходим по каждому элементу и сравниваем с предыдущим отличаются ли они знаком, если да, то добавляем к результату 1 3. Вывод результата **Параллельное выполнение** 1. Создание вектора, и разбиение его на части, если кол-во элементов не делится ровно на кол-во процессов, то остаток элементов идет последнему процессу 2. Каждый процесс последовательно проверяет свой локальный вектор, и границ 3. Суммируются локальные результаты и выводится общий. --- .../func_tests/main.cpp | 324 ++++++++++++++++++ .../include/ops_mpi.hpp | 48 +++ .../perf_tests/main.cpp | 95 +++++ .../src/ops_mpi.cpp | 131 +++++++ .../func_tests/main.cpp | 234 +++++++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 75 ++++ .../src/ops_seq.cpp | 38 ++ 8 files changed, 969 insertions(+) create mode 100644 tasks/mpi/zaytsev_d_num_of_alternations_signs/func_tests/main.cpp create mode 100644 tasks/mpi/zaytsev_d_num_of_alternations_signs/include/ops_mpi.hpp create mode 100644 tasks/mpi/zaytsev_d_num_of_alternations_signs/perf_tests/main.cpp create mode 100644 tasks/mpi/zaytsev_d_num_of_alternations_signs/src/ops_mpi.cpp create mode 100644 tasks/seq/zaytsev_d_num_of_alternations_signs/func_tests/main.cpp create mode 100644 tasks/seq/zaytsev_d_num_of_alternations_signs/include/ops_seq.hpp create mode 100644 tasks/seq/zaytsev_d_num_of_alternations_signs/perf_tests/main.cpp create mode 100644 tasks/seq/zaytsev_d_num_of_alternations_signs/src/ops_seq.cpp diff --git a/tasks/mpi/zaytsev_d_num_of_alternations_signs/func_tests/main.cpp b/tasks/mpi/zaytsev_d_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..220715f24d9 --- /dev/null +++ b/tasks/mpi/zaytsev_d_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,324 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/zaytsev_d_num_of_alternations_signs/include/ops_mpi.hpp" + +static std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100 - 50; + } + return vec; +} + +TEST(zaytsev_d_num_of_alternations_signs_mpi, AllPositive) { + boost::mpi::communicator world; + std::vector test_vector = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataPar->inputs_count.emplace_back(test_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataSeq->inputs_count.emplace_back(test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(global_count[0], reference_count[0]); + } +} + +TEST(zaytsev_d_num_of_alternations_signs_mpi, AllNegative) { + boost::mpi::communicator world; + std::vector test_vector = {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10}; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataPar->inputs_count.emplace_back(test_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataSeq->inputs_count.emplace_back(test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(global_count[0], reference_count[0]); + } +} + +TEST(zaytsev_d_num_of_alternations_signs_mpi, AlternatingPositiveNegative) { + boost::mpi::communicator world; + std::vector test_vector = {1, -1, 1, -1, 1, -1, 1, -1, 1, -1}; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataPar->inputs_count.emplace_back(test_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataSeq->inputs_count.emplace_back(test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(global_count[0], reference_count[0]); + } +} + +TEST(zaytsev_d_num_of_alternations_signs_mpi, WithZeros) { + boost::mpi::communicator world; + std::vector test_vector = {1, 0, -1, 0, 1, 0, -1, 0}; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataPar->inputs_count.emplace_back(test_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataSeq->inputs_count.emplace_back(test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(global_count[0], reference_count[0]); + } +} + +TEST(zaytsev_d_num_of_alternations_signs_mpi, BigVector) { + boost::mpi::communicator world; + std::vector test_vector = {1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1}; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataPar->inputs_count.emplace_back(test_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataSeq->inputs_count.emplace_back(test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(global_count[0], reference_count[0]); + } +} + +TEST(zaytsev_d_num_of_alternations_signs_mpi, SmallVector) { + boost::mpi::communicator world; + std::vector test_vector = {1, -1}; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataPar->inputs_count.emplace_back(test_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataSeq->inputs_count.emplace_back(test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(global_count[0], reference_count[0]); + } +} + +TEST(zaytsev_d_num_of_alternations_signs_mpi, EmptyVector) { + boost::mpi::communicator world; + std::vector test_vector = {}; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataPar->inputs_count.emplace_back(test_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(test_vector.data())); + taskDataSeq->inputs_count.emplace_back(test_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(global_count[0], reference_count[0]); + } +} + +TEST(zaytsev_d_num_of_alternations_signs_mpi, WithRandomVector) { + boost::mpi::communicator world; + int vector_size = 100; + std::vector random_vector = getRandomVector(vector_size); + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(random_vector.data())); + taskDataPar->inputs_count.emplace_back(random_vector.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(random_vector.data())); + taskDataSeq->inputs_count.emplace_back(random_vector.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(global_count[0], reference_count[0]); + } +} diff --git a/tasks/mpi/zaytsev_d_num_of_alternations_signs/include/ops_mpi.hpp b/tasks/mpi/zaytsev_d_num_of_alternations_signs/include/ops_mpi.hpp new file mode 100644 index 00000000000..e9dca6509e2 --- /dev/null +++ b/tasks/mpi/zaytsev_d_num_of_alternations_signs/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace zaytsev_d_num_of_alternations_signs_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; + std::string ops = "+"; + std::vector data_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + std::string ops = "+"; + boost::mpi::communicator world; +}; + +} // namespace zaytsev_d_num_of_alternations_signs_mpi \ No newline at end of file diff --git a/tasks/mpi/zaytsev_d_num_of_alternations_signs/perf_tests/main.cpp b/tasks/mpi/zaytsev_d_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..5b14178e0ce --- /dev/null +++ b/tasks/mpi/zaytsev_d_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,95 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/zaytsev_d_num_of_alternations_signs/include/ops_mpi.hpp" + +static std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100 - 50; + } + return vec; +} + +TEST(mpi_zaytsev_d_num_of_alternations_signs_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_result(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 12000000; + global_vec = getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_GE(global_result[0], 0); + } +} + +TEST(mpi_zaytsev_d_num_of_alternations_signs_perf_test, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_result(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 12000000; + global_vec = getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_GE(global_result[0], 0); + } +} \ No newline at end of file diff --git a/tasks/mpi/zaytsev_d_num_of_alternations_signs/src/ops_mpi.cpp b/tasks/mpi/zaytsev_d_num_of_alternations_signs/src/ops_mpi.cpp new file mode 100644 index 00000000000..c5fd834bcd8 --- /dev/null +++ b/tasks/mpi/zaytsev_d_num_of_alternations_signs/src/ops_mpi.cpp @@ -0,0 +1,131 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/zaytsev_d_num_of_alternations_signs/include/ops_mpi.hpp" + +#include +#include +#include + +bool zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + int* input_data = reinterpret_cast(taskData->inputs[0]); + int input_count = taskData->inputs_count[0]; + data_.assign(input_data, input_data + input_count); + res = 0; + return true; +} + +bool zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential::run() { + internal_order_test(); + if (data_.size() < 2) { + res = 0; + return true; + } + + for (size_t i = 1; i < data_.size(); ++i) { + if ((data_[i] >= 0 && data_[i - 1] < 0) || (data_[i] < 0 && data_[i - 1] >= 0)) { + res++; + } + } + + return true; +} + +bool zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + } + + res = 0; + return true; +} + +bool zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + unsigned int delta = 0; + unsigned int remainder = 0; + + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + remainder = taskData->inputs_count[0] % world.size(); + } + + broadcast(world, delta, 0); + broadcast(world, remainder, 0); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + int send_count = delta + (proc == world.size() - 1 ? remainder : 0); + world.send(proc, 0, input_.data() + proc * (int)delta, send_count); + } + } + + int local_size = (int)delta + (world.rank() == world.size() - 1 ? remainder : 0); + local_input_ = std::vector(local_size); + + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + local_size); + } else { + world.recv(0, 0, local_input_.data(), local_size); + } + + int local_count = 0; + + if (local_input_.size() > 1) { + for (size_t i = 1; i < local_input_.size(); i++) { + if ((local_input_[i - 1] >= 0 && local_input_[i] < 0) || (local_input_[i - 1] < 0 && local_input_[i] >= 0)) { + local_count++; + } + } + } + + int prev_value = 0; + if (world.rank() > 0) { + world.recv(world.rank() - 1, 0, &prev_value, 1); + if (!local_input_.empty() && + ((prev_value >= 0 && local_input_[0] < 0) || (prev_value < 0 && local_input_[0] >= 0))) { + local_count++; + } + } + + int last_value = local_input_.empty() ? 0 : local_input_.back(); + if (world.rank() < world.size() - 1) { + world.send(world.rank() + 1, 0, &last_value, 1); + } + + boost::mpi::reduce(world, local_count, res, std::plus<>(), 0); + return true; +} + +bool zaytsev_d_num_of_alternations_signs_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + + return true; +} \ No newline at end of file diff --git a/tasks/seq/zaytsev_d_num_of_alternations_signs/func_tests/main.cpp b/tasks/seq/zaytsev_d_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..9e50cf658db --- /dev/null +++ b/tasks/seq/zaytsev_d_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,234 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/zaytsev_d_num_of_alternations_signs/include/ops_seq.hpp" + +TEST(zaytsev_d_num_of_alternations_signs_seq, OnePositive) { + const int count = 1; + + std::vector in = {5}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 0); +} + +TEST(zaytsev_d_num_of_alternations_signs_seq, TwoOppositeSigns) { + const int count = 2; + + std::vector in = {5, -3}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], 1); +} + +TEST(zaytsev_d_num_of_alternations_signs_seq, AlternatingSigns) { + const int count = 5; + const int expected_result = 4; + + std::vector in = {5, -3, 8, -1, 4}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], expected_result); +} + +TEST(zaytsev_d_num_of_alternations_signs_seq, AllPositive) { + const int count = 5; + const int expected_result = 0; + + std::vector in = {5, 3, 8, 1, 4}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], expected_result); +} + +TEST(zaytsev_d_num_of_alternations_signs_seq, TwoSameValues) { + const int count = 2; + const int expected_result = 0; + + std::vector in = {5, 5}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], expected_result); +} + +TEST(zaytsev_d_num_of_alternations_signs_seq, WithZero) { + const int count = 5; + const int expected_result = 3; + + std::vector in = {5, 0, -3, 8, -1}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], expected_result); +} + +TEST(zaytsev_d_num_of_alternations_signs_seq, LongAlternatingSigns) { + const int count = 10; + const int expected_result = 9; + + std::vector in = {5, -4, 3, -2, 1, -1, 2, -3, 4, -5}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], expected_result); +} + +TEST(zaytsev_d_num_of_alternations_signs_seq, ManyZeros) { + const int count = 6; + const int expected_result = 0; + + std::vector in = {0, 0, 0, 0, 0, 0}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], expected_result); +} + +TEST(zaytsev_d_num_of_alternations_signs_seq, AllNegative) { + const int count = 4; + const int expected_result = 0; + + std::vector in = {-2, -3, -4, -5}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], expected_result); +} + +TEST(zaytsev_d_num_of_alternations_signs_seq, RandomOrderWithAlternations) { + const int count = 8; + const int expected_result = 5; + + std::vector in = {1, -2, 0, 3, -4, 0, 5, -6}; + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential testTask(taskDataSeq); + + ASSERT_EQ(testTask.validation(), true); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(out[0], expected_result); +} \ No newline at end of file diff --git a/tasks/seq/zaytsev_d_num_of_alternations_signs/include/ops_seq.hpp b/tasks/seq/zaytsev_d_num_of_alternations_signs/include/ops_seq.hpp new file mode 100644 index 00000000000..beab2fe0464 --- /dev/null +++ b/tasks/seq/zaytsev_d_num_of_alternations_signs/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace zaytsev_d_num_of_alternations_signs_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int res{}; + std::vector data_; +}; + +} // namespace zaytsev_d_num_of_alternations_signs_seq \ No newline at end of file diff --git a/tasks/seq/zaytsev_d_num_of_alternations_signs/perf_tests/main.cpp b/tasks/seq/zaytsev_d_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..b61f15a109b --- /dev/null +++ b/tasks/seq/zaytsev_d_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,75 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/zaytsev_d_num_of_alternations_signs/include/ops_seq.hpp" + +TEST(sequential_zaytsev_d_num_of_alternations_signs_perf_test, test_pipeline_run) { + const int input_size = 10000000; + + std::vector in(input_size); + for (int i = 0; i < input_size; ++i) { + in[i] = (i % 2 == 0) ? 1 : -1; + } + std::vector out(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ((input_size - 1), out[0]); +} + +TEST(sequential_zaytsev_d_num_of_alternations_signs_perf_test, test_task_run) { + const int input_size = 10000000; + + std::vector in(input_size); + for (int i = 0; i < input_size; ++i) { + in[i] = (i % 2 == 0) ? 1 : -1; + } + std::vector out(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ((input_size - 1), out[0]); +} diff --git a/tasks/seq/zaytsev_d_num_of_alternations_signs/src/ops_seq.cpp b/tasks/seq/zaytsev_d_num_of_alternations_signs/src/ops_seq.cpp new file mode 100644 index 00000000000..761f2300a31 --- /dev/null +++ b/tasks/seq/zaytsev_d_num_of_alternations_signs/src/ops_seq.cpp @@ -0,0 +1,38 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/zaytsev_d_num_of_alternations_signs/include/ops_seq.hpp" + +bool zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + int* input_data = reinterpret_cast(taskData->inputs[0]); + int input_count = taskData->inputs_count[0]; + data_.assign(input_data, input_data + input_count); + res = 0; + return true; +} + +bool zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential::run() { + internal_order_test(); + if (data_.size() < 2) { + res = 0; + return true; + } + + for (size_t i = 1; i < data_.size(); ++i) { + if ((data_[i] >= 0 && data_[i - 1] < 0) || (data_[i] < 0 && data_[i - 1] >= 0)) { + res++; + } + } + + return true; +} + +bool zaytsev_d_num_of_alternations_signs_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From eb50e318d35615856cd2cd4a6260b38cee620f07 Mon Sep 17 00:00:00 2001 From: TayaGordeeva <121258487+TayaGordeeva@users.noreply.github.com> Date: Sat, 9 Nov 2024 06:02:10 +0300 Subject: [PATCH 151/155] =?UTF-8?q?Revert=20"=D0=93=D0=BE=D1=80=D0=B4?= =?UTF-8?q?=D0=B5=D0=B5=D0=B2=D0=B0=20=D0=A2=D0=B0=D0=B8=D1=81=D0=B8=D1=8F?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=2016.=20=D0=9D=D0=B0=D1=85=D0=BE?= =?UTF-8?q?=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BC=D0=B0=D0=BA=D1=81?= =?UTF-8?q?=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD=D1=8B=D1=85=20=D0=B7=D0=BD?= =?UTF-8?q?=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9=20=D0=BF=D0=BE=20=D1=81?= =?UTF-8?q?=D1=82=D0=BE=D0=BB=D0=B1=D1=86=D0=B0=D0=BC=20=D0=BC=D0=B0=D1=82?= =?UTF-8?q?=D1=80=D0=B8=D1=86=D1=8B"=20(#193)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reverts learning-process/ppc-2024-autumn#180 Ошибка заключалась в том, что один из тестов отрабатывал больше 1 секунды. Собственно, так тест работал из-за большого размера матрицы. Ошибка исправлена **Описание программы.** Программа вычисляет максимальное значение в каждом столбце матрицы. _-Последовательная задача:_ изначально максимальное значение равно первому элементу текущего столбца; происходит сравнение текущего максимального значения с каждым элементом из соответствующего столбца - если значение элемента больше, то оно становится максимальным значением результат: вектор с максимальными значениями для каждого столбца _-MPI задача:_ матрица разбивается на кол-во процессов каждый процесс ищет максимум в каждом столбце своих данных результаты процессов отправляются на главный процесс и объединяются результат: вектор максимальных значений для каждого столбца --------- Co-authored-by: TayaGordeeva --- .../func_tests/main.cpp | 210 ++++++++++++++++++ .../include/ops_mpi.hpp | 46 ++++ .../perf_tests/main.cpp | 81 +++++++ .../src/ops_mpi.cpp | 188 ++++++++++++++++ .../func_tests/main.cpp | 149 +++++++++++++ .../include/ops_seq.hpp | 25 +++ .../perf_tests/main.cpp | 79 +++++++ .../src/ops_seq.cpp | 80 +++++++ 8 files changed, 858 insertions(+) create mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp create mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..cd5ec3ec2d7 --- /dev/null +++ b/tasks/mpi/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp @@ -0,0 +1,210 @@ +#include + +#include +#include +#include + +#include "mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp" + +TEST(gordeva_t_max_val_of_column_matrix_mpi, IsEmptyInput) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, IsEmptyOutput) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(5); + taskDataPar->inputs_count.push_back(5); + taskDataPar->inputs.push_back(reinterpret_cast(new int[25])); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Max_val_of_500_columns_with_random) { + boost::mpi::communicator world; + + const int rows = 500; + const int cols = 500; + std::vector> global_matr; + std::vector global_max(cols, INT_MIN); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::rand_matr(rows, cols); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataPar->inputs_count = {rows, cols}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector max_example(cols, INT_MIN); + + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(max_example.data())); + taskDataSeq->outputs_count.emplace_back(max_example.size()); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(global_max, max_example); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Max_val_of_500_1000_columns_with_random) { + boost::mpi::communicator world; + + const int rows = 500; + const int cols = 1000; + std::vector> global_matr; + std::vector global_max(cols, INT_MIN); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::rand_matr(rows, cols); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataPar->inputs_count = {rows, cols}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector max_example(cols, INT_MIN); + + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(max_example.data())); + taskDataSeq->outputs_count.emplace_back(max_example.size()); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int i = 0; i < cols; i++) { + ASSERT_EQ(global_max[i], max_example[i]); + } + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Max_val_of_1000_3000_columns_with_random) { + boost::mpi::communicator world; + + const int rows = 1000; + const int cols = 3000; + std::vector> global_matr; + std::vector global_max(cols, INT_MIN); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::rand_matr(rows, cols); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataPar->inputs_count = {rows, cols}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector max_example(cols, INT_MIN); + + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matr.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matr[i].data())); + } + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(max_example.data())); + taskDataSeq->outputs_count.emplace_back(max_example.size()); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int i = 0; i < cols; i++) { + ASSERT_EQ(global_max[i], max_example[i]); + } + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Incorrect_val_size_of_input) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(2); + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs.push_back(reinterpret_cast(new int[6])); + taskDataPar->outputs_count.push_back(2); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, Incorrect_val_of_output) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(10); + taskDataPar->inputs_count.push_back(15); + taskDataPar->inputs.push_back(reinterpret_cast(new int[150])); + taskDataPar->outputs_count.push_back(2); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..702922761ca --- /dev/null +++ b/tasks/mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp @@ -0,0 +1,46 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace gordeva_t_max_val_of_column_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + static std::vector rand_vec(int s, int down = -100, int upp = 100); + static std::vector> rand_matr(int rows, int cols); + + private: + std::vector> input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_, local_input_; + std::vector res; + boost::mpi::communicator world; +}; + +} // namespace gordeva_t_max_val_of_column_matrix_mpi diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..462eb21ea28 --- /dev/null +++ b/tasks/mpi/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp" + +TEST(gordeva_t_max_val_of_column_matrix_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector> global_matr; + std::vector max_s; + + std::shared_ptr taskDataPar = std::make_shared(); + int rows = 5000; + int cols = 5000; + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::rand_matr(rows, cols); + max_s.resize(cols, INT_MIN); + for (auto& i : global_matr) { + taskDataPar->inputs.emplace_back(reinterpret_cast(i.data())); + } + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(max_s.data())); + taskDataPar->outputs_count.emplace_back(max_s.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t j = 0; j < max_s.size(); ++j) { + ASSERT_EQ(max_s[j], 200); + } + } +} + +TEST(gordeva_t_max_val_of_column_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + + std::vector> global_matr; + std::vector max_s; + std::shared_ptr taskDataPar = std::make_shared(); + int rows = 7000; + int cols = 7000; + + if (world.rank() == 0) { + global_matr = gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::rand_matr(rows, cols); + max_s.resize(cols, INT_MIN); + + for (auto& i : global_matr) { + taskDataPar->inputs.emplace_back(reinterpret_cast(i.data())); + } + + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(max_s.data())); + taskDataPar->outputs_count.emplace_back(max_s.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t j = 0; j < max_s.size(); ++j) { + ASSERT_EQ(max_s[j], 200); + } + } +} diff --git a/tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp b/tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..01d16fa9689 --- /dev/null +++ b/tasks/mpi/gordeva_t_max_val_of_column_matrix/src/ops_mpi.cpp @@ -0,0 +1,188 @@ +#include "mpi/gordeva_t_max_val_of_column_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + int* input_matr = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) input_[i][j] = input_matr[j]; + } + + res.resize(cols); + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + if (taskData->inputs.empty() || taskData->outputs.empty()) return false; + if (taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) return false; + if (taskData->outputs_count.size() != 1) return false; + if (taskData->inputs_count.size() < 2) return false; + if (taskData->outputs_count[0] != taskData->inputs_count[1]) return false; + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_[0].size(); i++) { + int max_el = input_[0][i]; + for (size_t j = 1; j < input_.size(); j++) + if (input_[j][i] > max_el) max_el = input_[j][i]; + + res[i] = max_el; + } + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + int* output_matr = reinterpret_cast(taskData->outputs[0]); + + std::copy(res.begin(), res.end(), output_matr); + return true; +} + +std::vector gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::rand_vec(int s, int down, int upp) { + std::vector v(s); + for (auto& i : v) i = down + (std::rand() % (upp - down + 1)); + return v; +} + +std::vector> gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskSequential::rand_matr(int rows, + int cols) { + std::vector> matr(rows, std::vector(cols)); + + for (int i = 0; i < rows; ++i) { + matr[i] = rand_vec(cols, -500, 500); + } + for (int j = 0; j < cols; ++j) { + int row_rand = std::rand() % rows; + matr[row_rand][j] = 10; + } + return matr; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + if (taskData->inputs.empty() || taskData->outputs.empty()) return false; + if (taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) return false; + if (taskData->outputs_count.size() != 1) return false; + if (taskData->inputs_count.size() < 2) return false; + if (taskData->outputs_count[0] != taskData->inputs_count[1]) return false; + } + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + size_t delta = 0; + size_t delta_1 = 0; + size_t local_cols = 0; + + if (world.rank() == 0) { + size_t rows = taskData->inputs_count[0]; + size_t cols = taskData->inputs_count[1]; + + delta = rows / world.size(); + delta_1 = rows % world.size(); + + boost::mpi::broadcast(world, delta, 0); + boost::mpi::broadcast(world, delta_1, 0); + + input_.resize(rows, std::vector(cols)); + for (size_t i = 0; i < rows; i++) { + int* input_matr = reinterpret_cast(taskData->inputs[i]); + input_[i].assign(input_matr, input_matr + cols); + } + + for (int proc = 1; proc < world.size(); ++proc) { + size_t start_row = (proc * delta) + std::min(static_cast(proc), delta_1); + size_t rows_to_send = delta + ((static_cast(proc) < delta_1) ? 1 : 0); + + world.send(proc, 0, cols); + + for (size_t i = 0; i < rows_to_send; ++i) { + world.send(proc, 0, input_[start_row + i]); + } + } + + size_t local_input_rows = delta + ((static_cast(world.rank()) < delta_1) ? 1 : 0); + local_cols = cols; + local_input_.assign(input_.begin(), std::next(input_.begin(), static_cast(local_input_rows))); + } else { + boost::mpi::broadcast(world, delta, 0); + boost::mpi::broadcast(world, delta_1, 0); + + size_t local_input_rows = delta + (static_cast(world.rank()) < delta_1 ? 1 : 0); + + world.recv(0, 0, local_cols); + + local_input_.resize(local_input_rows, std::vector(local_cols)); + for (size_t i = 0; i < local_input_rows; ++i) { + world.recv(0, 0, local_input_[i]); + } + } + + res.resize(local_cols); + std::vector tmp_max(local_cols, INT_MIN); + + for (size_t i = 0; i < local_cols; ++i) { + for (size_t j = 0; j < local_input_.size(); ++j) { + tmp_max[i] = std::max(tmp_max[i], local_input_[j][i]); + } + } + + if (world.rank() == 0) { + std::vector max_s(local_cols, INT_MIN); + std::copy(tmp_max.begin(), tmp_max.end(), max_s.begin()); + + for (int proc = 1; proc < world.size(); ++proc) { + std::vector proc_max(local_cols); + world.recv(proc, 0, proc_max); + + for (size_t i = 0; i < local_cols; ++i) { + max_s[i] = std::max(max_s[i], proc_max[i]); + } + } + res = max_s; + } else { + world.send(0, 0, tmp_max); + } + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + std::copy(res.begin(), res.end(), reinterpret_cast(taskData->outputs[0])); + } + return true; +} diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..0b149b9d14e --- /dev/null +++ b/tasks/seq/gordeva_t_max_val_of_column_matrix/func_tests/main.cpp @@ -0,0 +1,149 @@ +#include + +#include +#include + +#include "seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp" + +TEST(gordeva_t_max_val_of_column_matrix_seq, IsEmptyInput) { + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, IsEmptyOutput) { + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(5); + taskDataSeq->inputs_count.push_back(5); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[25])); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Max_val_of_500_columns_with_random) { + const int rows = 500; + const int cols = 500; + + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::rand_matr(rows, cols); + for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_el = matrix[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix[i][j] > max_el) { + max_el = matrix[i][j]; + } + } + ASSERT_EQ(res[j], max_el); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Max_val_of_500_1000_columns_with_random) { + const int rows = 500; + const int cols = 1000; + + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::rand_matr(rows, cols); + for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_el = matrix[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix[i][j] > max_el) { + max_el = matrix[i][j]; + } + } + ASSERT_EQ(res[j], max_el); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Max_val_of_1000_3000_columns_with_random) { + const int rows = 1000; + const int cols = 3000; + + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::rand_matr(rows, cols); + for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataSeq->outputs_count.emplace_back(res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_el = matrix[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix[i][j] > max_el) { + max_el = matrix[i][j]; + } + } + ASSERT_EQ(res[j], max_el); + } +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Incorrect_val_size_of_input) { + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(10); + taskDataSeq->inputs_count.push_back(0); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[10])); + taskDataSeq->outputs_count.push_back(1); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, Incorrect_val_of_output) { + std::shared_ptr taskDataSeq = std::make_shared(); + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(10); + taskDataSeq->inputs_count.push_back(15); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[150])); + taskDataSeq->outputs_count.push_back(10); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..577b7194572 --- /dev/null +++ b/tasks/seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace gordeva_t_max_val_of_column_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + static std::vector rand_vec(int size, int down = -100, int upp = 100); + static std::vector> rand_matr(int rows, int cols); + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace gordeva_t_max_val_of_column_matrix_seq diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..2ee62e9c724 --- /dev/null +++ b/tasks/seq/gordeva_t_max_val_of_column_matrix/perf_tests/main.cpp @@ -0,0 +1,79 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp" + +TEST(gordeva_t_max_val_of_column_matrix_seq, test_pipeline_run) { + const int cols = 5000; + const int rows = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matrix = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::rand_matr(rows, cols); + + for (auto &i : matrix) taskDataSeq->inputs.emplace_back(reinterpret_cast(i.data())); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector res_vec(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_vec.data())); + taskDataSeq->outputs_count.emplace_back(res_vec.size()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < cols; i++) ASSERT_EQ(res_vec[i], 200); +} + +TEST(gordeva_t_max_val_of_column_matrix_seq, test_task_run) { + const int cols = 7000; + const int rows = 7000; + + std::shared_ptr taskDataSeq = std::make_shared(); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matr_rand = + gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::rand_matr(rows, cols); + for (auto &row : matr_rand) taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector res_vec(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(res_vec.data())); + taskDataSeq->outputs_count.emplace_back(res_vec.size()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (int i = 0; i < cols; i++) ASSERT_EQ(res_vec[i], 200); +} diff --git a/tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp b/tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..6a4b610fb02 --- /dev/null +++ b/tasks/seq/gordeva_t_max_val_of_column_matrix/src/ops_seq.cpp @@ -0,0 +1,80 @@ +#include "seq/gordeva_t_max_val_of_column_matrix/include/ops_seq.hpp" + +#include + +namespace gordeva_t_max_val_of_column_matrix_seq { + +bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + int* input_matr; + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + input_matr = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) input_[i][j] = input_matr[j]; + } + + res_.resize(cols); + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + + if (taskData->inputs.empty() || taskData->outputs.empty()) return false; + if (taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) return false; + if (taskData->outputs_count.size() != 1) return false; + if (taskData->inputs_count.size() < 2) return false; + if (taskData->outputs_count[0] != taskData->inputs_count[1]) return false; + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + for (size_t i = 0; i < input_[0].size(); i++) { + int max_el = input_[0][i]; + for (size_t j = 1; j < input_.size(); j++) + if (input_[j][i] > max_el) max_el = input_[j][i]; + + res_[i] = max_el; + } + + return true; +} + +bool gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + int* output_matr = reinterpret_cast(taskData->outputs[0]); + + std::copy(res_.begin(), res_.end(), output_matr); + return true; +} + +std::vector gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::rand_vec(int size, int down, int upp) { + std::vector v(size); + for (auto& number : v) number = down + (std::rand() % (upp - down + 1)); + return v; +} + +std::vector> gordeva_t_max_val_of_column_matrix_seq::TestTaskSequential::rand_matr(int rows, + int cols) { + std::vector> matr(rows, std::vector(cols)); + + for (int i = 0; i < rows; ++i) { + matr[i] = rand_vec(cols, -500, 500); + } + for (int j = 0; j < cols; ++j) { + int row_rand = std::rand() % rows; + matr[row_rand][j] = 10; + } + return matr; +} + +} // namespace gordeva_t_max_val_of_column_matrix_seq From 28f485f524c4fe81b974c521e26b2aabc7eae892 Mon Sep 17 00:00:00 2001 From: andrewmeow <91527374+andrewmeow@users.noreply.github.com> Date: Sat, 9 Nov 2024 06:02:51 +0300 Subject: [PATCH 152/155] =?UTF-8?q?=D0=97=D0=BE=D0=BB=D0=BE=D1=82=D0=B0?= =?UTF-8?q?=D1=80=D0=B5=D0=B2=D0=B0=20=D0=90=D1=80=D0=B8=D0=BD=D0=B0.=20?= =?UTF-8?q?=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80?= =?UTF-8?q?=D0=B8=D0=B0=D0=BD=D1=82=2024.=20=D0=9F=D0=BE=D0=B4=D1=81=D1=87?= =?UTF-8?q?=D0=B5=D1=82=20=D1=87=D0=B8=D1=81=D0=BB=D0=B0=20=D1=81=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=D0=B2=20=D1=81=D1=82=D1=80=D0=BE=D0=BA=D0=B5.?= =?UTF-8?q?=20(#198)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Описание последовательной задачи: Создаем строку, в которой содержится текст и инициализируем счётчик слов и флаг in_word, который показывает, находимся ли мы внутри слова. Проходим по каждому символу строки: если символ — пробел, то сбрасываем флаг in_word, указывая, что мы вышли из слова. Если символ не пробел и флаг in_word сброшен, значит, начинается новое слово — увеличиваем счётчик слов и устанавливаем флаг in_word в true. Описание параллельной задачи: Создаем строку, в которой содержится текст, и делим её на несколько частей, в зависимости от числа доступных процессов(delta). Для каждого процесса определяем долю строки, которую он будет обрабатывать. Каждый процесс подсчитывает количество слов в своей части строки. Также мы учитываем, что слово может попасть к потоку не целым. Затем результаты от каждого процесса собираются и суммируются для получения общего количества слов. --- .../func_tests/main.cpp | 81 +++++++++++++ .../include/ops_mpi.hpp | 45 +++++++ .../perf_tests/main.cpp | 97 +++++++++++++++ .../src/ops_mpi.cpp | 105 +++++++++++++++++ .../func_tests/main.cpp | 111 ++++++++++++++++++ .../include/ops_seq.hpp | 24 ++++ .../perf_tests/main.cpp | 89 ++++++++++++++ .../src/ops_seq.cpp | 37 ++++++ 8 files changed, 589 insertions(+) create mode 100644 tasks/mpi/zolotareva_a_count_of_words/func_tests/main.cpp create mode 100644 tasks/mpi/zolotareva_a_count_of_words/include/ops_mpi.hpp create mode 100644 tasks/mpi/zolotareva_a_count_of_words/perf_tests/main.cpp create mode 100644 tasks/mpi/zolotareva_a_count_of_words/src/ops_mpi.cpp create mode 100644 tasks/seq/zolotareva_a_count_of_words/func_tests/main.cpp create mode 100644 tasks/seq/zolotareva_a_count_of_words/include/ops_seq.hpp create mode 100644 tasks/seq/zolotareva_a_count_of_words/perf_tests/main.cpp create mode 100644 tasks/seq/zolotareva_a_count_of_words/src/ops_seq.cpp diff --git a/tasks/mpi/zolotareva_a_count_of_words/func_tests/main.cpp b/tasks/mpi/zolotareva_a_count_of_words/func_tests/main.cpp new file mode 100644 index 00000000000..d5900db8ebd --- /dev/null +++ b/tasks/mpi/zolotareva_a_count_of_words/func_tests/main.cpp @@ -0,0 +1,81 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "mpi/zolotareva_a_count_of_words/include/ops_mpi.hpp" + +void form(std::string &&str) { + boost::mpi::communicator world; + std::string global_string; + size_t global_count = 0; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_string = str; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_string.data())); + taskDataPar->inputs_count.emplace_back(global_string.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&global_count)); + taskDataPar->outputs_count.emplace_back(1); + } + + zolotareva_a_count_of_words_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + size_t reference_count = 0; + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_string.data())); + taskDataSeq->inputs_count.emplace_back(global_string.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&reference_count)); + taskDataSeq->outputs_count.emplace_back(1); + + zolotareva_a_count_of_words_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count, global_count); + } +} +TEST(zolotareva_a_count_of_words_mpi, Test_Count_Words) { form("1"); } +TEST(zolotareva_a_count_of_words_mpi, Test_Two_Words) { form("Hello World"); } + +TEST(zolotareva_a_count_of_words_mpi, Test_Leading_Trailing_Spaces) { form(" Hello World "); } + +TEST(zolotareva_a_count_of_words_mpi, Test_Only_Spaces) { form(" "); } + +TEST(zolotareva_a_count_of_words_mpi, Test_Complex_Sentence) { form("Multiple Spaces Between Words"); } + +TEST(zolotareva_a_count_of_words_mpi, Test_Numbers_And_Spaces) { form(" 1 2 3 4 5 "); } + +TEST(zolotareva_a_count_of_words_mpi, Test_Multiple_Consecutive_Letters) { form("A B C D E F G H I J"); } + +TEST(zolotareva_a_count_of_words_mpi, Test_Long_String) { + form( + "This is a very long string that contains many words spaces and punctuation marks to ensure that the count works " + "properly"); +} + +TEST(zolotareva_a_count_of_words_mpi, Test_Very_Long_String) { + form( + "My parents are very good and kind. They are young. My mother's name is Natalia. She is thirty years old. She is " + "a doctor. My mother is very beautiful. My father's name is Victor. He is thirty-two years old. He is an " + "engineer. I love my parents very much."); +} +TEST(zolotareva_a_count_of_words_mpi, Test_Very_Very_Long_String) { + form( + "Children start school at the age of five, but there is some free nursery-school education before that age. The " + "state nursery schools are not for all. They are for some families, for example for families with only one " + "parent. In most areas there are private nursery schools. Parents who want their children to go to nursery " + "school pay for their children under 5 years old to go to these private nursery schools.Some parents prefer " + "private education. In England and Wales, private schools are called public schools. They are very expensive. " + "Only 5 per cent of the school population goes to public schools. Public schools are for pupils from 5 or 7 to " + "18 years old. Some public schools are day schools, but many public schools are boarding schools. Pupils live in " + "the school and go home in the holidays."); +} \ No newline at end of file diff --git a/tasks/mpi/zolotareva_a_count_of_words/include/ops_mpi.hpp b/tasks/mpi/zolotareva_a_count_of_words/include/ops_mpi.hpp new file mode 100644 index 00000000000..2f59231672b --- /dev/null +++ b/tasks/mpi/zolotareva_a_count_of_words/include/ops_mpi.hpp @@ -0,0 +1,45 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace zolotareva_a_count_of_words_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + std::string local_input_; + int res{}; + boost::mpi::communicator world; +}; + +} // namespace zolotareva_a_count_of_words_mpi \ No newline at end of file diff --git a/tasks/mpi/zolotareva_a_count_of_words/perf_tests/main.cpp b/tasks/mpi/zolotareva_a_count_of_words/perf_tests/main.cpp new file mode 100644 index 00000000000..135da9f19d4 --- /dev/null +++ b/tasks/mpi/zolotareva_a_count_of_words/perf_tests/main.cpp @@ -0,0 +1,97 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/zolotareva_a_count_of_words/include/ops_mpi.hpp" + +std::string getRandomString(int size, size_t *count_words) { + const std::string ABC = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + std::string res; + *count_words = 1; + for (int i = 0; i < size; i++) { + int x = rand() % (ABC.size() + 1); + res += ABC[x]; + if (rand() % 2 == 1 && i < size - 1) { + res += " "; + (*count_words) += 1; + } + } + return res; +} + +TEST(mpi_zolotareva_a_count_of_words_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + std::string global_string; + size_t answer = 0; + size_t global_count = 0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_string = getRandomString(1000, &answer); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_string.data())); + taskDataPar->inputs_count.emplace_back(global_string.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&global_count)); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(answer, global_count); + } +} + +TEST(mpi_zolotareva_a_count_of_words_perf_test, test_task_run) { + boost::mpi::communicator world; + std::string global_string; + size_t answer = 0; + size_t global_count = 0; + + global_string = getRandomString(1000, &answer); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_string = getRandomString(1000, &answer); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_string.data())); + taskDataPar->inputs_count.emplace_back(global_string.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&global_count)); + taskDataPar->outputs_count.emplace_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(answer, global_count); + } +} diff --git a/tasks/mpi/zolotareva_a_count_of_words/src/ops_mpi.cpp b/tasks/mpi/zolotareva_a_count_of_words/src/ops_mpi.cpp new file mode 100644 index 00000000000..b24a3b1bf16 --- /dev/null +++ b/tasks/mpi/zolotareva_a_count_of_words/src/ops_mpi.cpp @@ -0,0 +1,105 @@ +// Copyright 2023 Nesterov Alexander +// здесь писать саму задачу +#include "mpi/zolotareva_a_count_of_words/include/ops_mpi.hpp" + +#include + +bool zolotareva_a_count_of_words_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + res = 0; + return true; +} + +bool zolotareva_a_count_of_words_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return (taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0); +} + +bool zolotareva_a_count_of_words_mpi::TestMPITaskSequential::run() { + internal_order_test(); + bool in_word = false; + for (char c : input_) { + if (c == ' ' && in_word) { + ++res; + in_word = false; + } else if (c != ' ') { + in_word = true; + } + } + if (in_word) ++res; + return true; +} + +bool zolotareva_a_count_of_words_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool zolotareva_a_count_of_words_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return (taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0); + } + return true; +} + +bool zolotareva_a_count_of_words_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + int world_size = world.size(); + unsigned int delta; + if (world.rank() == 0) delta = taskData->inputs_count[0] / world_size; + if (world_size == 1) { + local_input_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + return true; + } + boost::mpi::broadcast(world, delta, 0); + + if (world.rank() == 0) { + input_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + unsigned int str_size = taskData->inputs_count[0]; + unsigned int remainder = str_size % world_size; + local_input_ = input_.substr(0, remainder + delta); + for (int proc = 1; proc < world_size; proc++) { + world.send(proc, 0, input_.data() + remainder + proc * delta - 1, delta + 1); + } + } else { + local_input_ = std::string(delta + 1, '\0'); + world.recv(0, 0, local_input_.data(), delta + 1); + } + res = 0; + return true; +} + +bool zolotareva_a_count_of_words_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int local_res = 0; + bool in_word = false; + + for (char c : local_input_) { + if (c == ' ' && in_word) { + ++local_res; + in_word = false; + } else if (c != ' ') { + in_word = true; + } + } + if (in_word) ++local_res; + if (world.rank() != 0 && local_input_[0] != ' ' && in_word) { + --local_res; + } + if (world.rank() == (world.size() - 1) && local_input_[0] != ' ' && !in_word) { + --local_res; + } + boost::mpi::reduce(world, local_res, res, std::plus(), 0); + return true; +} + +bool zolotareva_a_count_of_words_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/zolotareva_a_count_of_words/func_tests/main.cpp b/tasks/seq/zolotareva_a_count_of_words/func_tests/main.cpp new file mode 100644 index 00000000000..22376105a99 --- /dev/null +++ b/tasks/seq/zolotareva_a_count_of_words/func_tests/main.cpp @@ -0,0 +1,111 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/zolotareva_a_count_of_words/include/ops_seq.hpp" + +TEST(zolotareva_a_count_of_words_seq, Test_Empty) { + const std::string input = " "; + const int count = 0; + + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zolotareva_a_count_of_words_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(count, out[0]); +} + +TEST(zolotareva_a_count_of_words_seq, Test_Count_Words_1) { + const int count = 1; + const std::string input = "Hello"; + + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zolotareva_a_count_of_words_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(count, out[0]); +} + +TEST(zolotareva_a_count_of_words_seq, Test_Count_Words_5) { + const int count = 5; + const std::string input = "1 2 3 4 5"; + + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zolotareva_a_count_of_words_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(count, out[0]); +} + +TEST(zolotareva_a_count_of_words_seq, Test_Count_Words_2_With_Spaces) { + const int count = 2; + const std::string input = " Hello there "; + + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zolotareva_a_count_of_words_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(count, out[0]); +} + +TEST(zolotareva_a_count_of_words_seq, Test_Count_Words_With_Special_Char) { + const int count = 2; + const std::string input = "Hello, word! "; + + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + zolotareva_a_count_of_words_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(count, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/zolotareva_a_count_of_words/include/ops_seq.hpp b/tasks/seq/zolotareva_a_count_of_words/include/ops_seq.hpp new file mode 100644 index 00000000000..c53ac161ca5 --- /dev/null +++ b/tasks/seq/zolotareva_a_count_of_words/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace zolotareva_a_count_of_words_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int res{}; +}; + +} // namespace zolotareva_a_count_of_words_seq \ No newline at end of file diff --git a/tasks/seq/zolotareva_a_count_of_words/perf_tests/main.cpp b/tasks/seq/zolotareva_a_count_of_words/perf_tests/main.cpp new file mode 100644 index 00000000000..47c9a01f1fa --- /dev/null +++ b/tasks/seq/zolotareva_a_count_of_words/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/zolotareva_a_count_of_words/include/ops_seq.hpp" + +TEST(sequential_zolotareva_a_count_of_words_perf_test, test_pipeline_run) { + const int count = 162; + const std::string input = + "I was too young to be other than awed and puzzled by Doc Marlowe when I knew him. I was only sixteen when he " + "died. He was sixty-seven. There was that vast difference in our ages and there was a vaster difference in our " + "backgrounds. Doc Marlowe was a medicine-show man. He had been a lot of other things, too: a circus man, the " + "proprietor of a concession at Coney Island, a saloon-keeper; but in his fifties he had travelled around with a " + "tent-show troupe made up of a Mexican named Chickalilli, who threw knives, and a man called Professor Jones, " + "who played the banjo. Doc Marlowe would come out after the entertainment and harangue the crowd and sell " + "bottles of medicine for all kinds of ailments. I found out all this about him gradually, toward the last, and " + "after he died. When I first knew him, he represented the Wild West to me, and there was nobody I admired so " + "much."; + + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count, out[0]); +} + +TEST(sequential_zolotareva_a_count_of_words_perf_test, test_task_run) { + const int count = 162; + const std::string input = + "I was too young to be other than awed and puzzled by Doc Marlowe when I knew him. I was only sixteen when he " + "died. He was sixty-seven. There was that vast difference in our ages and there was a vaster difference in our " + "backgrounds. Doc Marlowe was a medicine-show man. He had been a lot of other things, too: a circus man, the " + "proprietor of a concession at Coney Island, a saloon-keeper; but in his fifties he had travelled around with a " + "tent-show troupe made up of a Mexican named Chickalilli, who threw knives, and a man called Professor Jones, " + "who played the banjo. Doc Marlowe would come out after the entertainment and harangue the crowd and sell " + "bottles of medicine for all kinds of ailments. I found out all this about him gradually, toward the last, and " + "after he died. When I first knew him, he represented the Wild West to me, and there was nobody I admired so " + "much."; + + std::vector in(input.begin(), input.end()); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/zolotareva_a_count_of_words/src/ops_seq.cpp b/tasks/seq/zolotareva_a_count_of_words/src/ops_seq.cpp new file mode 100644 index 00000000000..5b41212dccd --- /dev/null +++ b/tasks/seq/zolotareva_a_count_of_words/src/ops_seq.cpp @@ -0,0 +1,37 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/zolotareva_a_count_of_words/include/ops_seq.hpp" + +#include + +bool zolotareva_a_count_of_words_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool zolotareva_a_count_of_words_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_.assign(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + res = 0; + return true; +} + +bool zolotareva_a_count_of_words_seq::TestTaskSequential::run() { + internal_order_test(); + + bool in_word = false; + for (char c : input_) { + if (c == ' ') + in_word = false; + else if (!in_word) { + ++res; + in_word = true; + } + } + return true; +} + +bool zolotareva_a_count_of_words_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From afe3876f3dae76e15c4627216b8478f272a18c76 Mon Sep 17 00:00:00 2001 From: Artem Z <67142290+aortemon@users.noreply.github.com> Date: Sat, 9 Nov 2024 06:03:42 +0300 Subject: [PATCH 153/155] =?UTF-8?q?=D0=97=D0=B0=D0=B9=D1=86=D0=B5=D0=B2=20?= =?UTF-8?q?=D0=90=D1=80=D1=82=D0=B5=D0=BC.=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201.=20=D0=92=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=204.=20=D0=9C=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD?= =?UTF-8?q?=D1=8B=D0=B9=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5=D0=BD=D1=82=20?= =?UTF-8?q?=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0.=20(#204)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Последовательная реализация Принять, что первый элемент вектора, является текущим минимальным найденным. Сравнить текущий минимальный элемент с очередным непроверенным элементом вектора, начиная со второго. При достижении конца вектора текущий минимальный элемент будет минимальным элементом во всем векторе. ### Параллельная реализация Разбить вектор на подвекторы длины, равной соотношению длины исходного вектора к количеству выделенных потоков. В каждом потоке запустить последовательную реализацию алгоритма для соответствующего подвектора. С помощью `boost::mpi::reduce()` найти наименьший элемент среди минимумов, полученных в каждом потоке. --- .../func_tests/main.cpp | 208 ++++++++++++++++++ .../include/ops_mpi.hpp | 47 ++++ .../perf_tests/main.cpp | 113 ++++++++++ .../src/ops_mpi.cpp | 99 +++++++++ .../func_tests/main.cpp | 134 +++++++++++ .../include/ops_seq.hpp | 24 ++ .../perf_tests/main.cpp | 106 +++++++++ .../src/ops_seq.cpp | 36 +++ 8 files changed, 767 insertions(+) create mode 100644 tasks/mpi/zaitsev_a_min_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/mpi/zaitsev_a_min_of_vector_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/zaitsev_a_min_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/zaitsev_a_min_of_vector_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/zaitsev_a_min_of_vector_elements/func_tests/main.cpp create mode 100644 tasks/seq/zaitsev_a_min_of_vector_elements/include/ops_seq.hpp create mode 100644 tasks/seq/zaitsev_a_min_of_vector_elements/perf_tests/main.cpp create mode 100644 tasks/seq/zaitsev_a_min_of_vector_elements/src/ops_seq.cpp diff --git a/tasks/mpi/zaitsev_a_min_of_vector_elements/func_tests/main.cpp b/tasks/mpi/zaitsev_a_min_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..6095659a9b3 --- /dev/null +++ b/tasks/mpi/zaitsev_a_min_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,208 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/zaitsev_a_min_of_vector_elements/include/ops_mpi.hpp" + +namespace zaitsev_a_min_of_vector_elements_mpi { +std::vector getRandomVector(int sz, int minRangeValue, int maxRangeValue) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = minRangeValue + gen() % (maxRangeValue - minRangeValue + 1); + } + return vec; +} +} // namespace zaitsev_a_min_of_vector_elements_mpi + +TEST(zaitsev_a_min_of_vector_elements_mpi, test_case_even_length_vector) { + const int extrema = -105; + const int minRangeValue = -100; + const int maxRangeValue = 1000; + + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, maxRangeValue + 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10e2; + global_vec = zaitsev_a_min_of_vector_elements_mpi::getRandomVector(count_size_vector, minRangeValue, maxRangeValue); + global_vec[global_vec.size() / 2] = extrema; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + + // Create Task + zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference[0], global_min[0]); + } +} + +TEST(zaitsev_a_min_of_vector_elements_mpi, test_case_odd_length_vector) { + const int extrema = -105; + const int minRangeValue = -100; + const int maxRangeValue = 1000; + + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, maxRangeValue + 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10e2 + 1; + global_vec = zaitsev_a_min_of_vector_elements_mpi::getRandomVector(count_size_vector, minRangeValue, maxRangeValue); + global_vec[global_vec.size() / 2] = extrema; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + + // Create Task + zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference[0], global_min[0]); + } +} + +TEST(zaitsev_a_min_of_vector_elements_mpi, test_case_empty_vector) { + const int minRangeValue = -100; + const int maxRangeValue = 1000; + + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, maxRangeValue + 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 0; + global_vec = zaitsev_a_min_of_vector_elements_mpi::getRandomVector(count_size_vector, minRangeValue, maxRangeValue); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + + // Create Task + zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(zaitsev_a_min_of_vector_elements_mpi, test_case_singleton) { + const int extrema = -1; + + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, 31415); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1; + global_vec = std::vector(count_size_vector, extrema); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference(1, extrema); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference.data())); + taskDataSeq->outputs_count.emplace_back(reference.size()); + + // Create Task + zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference[0], global_min[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/zaitsev_a_min_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/zaitsev_a_min_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..d6d30784a11 --- /dev/null +++ b/tasks/mpi/zaitsev_a_min_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace zaitsev_a_min_of_vector_elements_mpi { + +std::vector getRandomVector(int sz, int minRangeValue, int maxRangeValue); + +class MinOfVectorElementsSequential : public ppc::core::Task { + public: + explicit MinOfVectorElementsSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input{}; + int res{}; +}; + +class MinOfVectorElementsParallel : public ppc::core::Task { + public: + explicit MinOfVectorElementsParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + boost::mpi::communicator world; +}; + +} // namespace zaitsev_a_min_of_vector_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/zaitsev_a_min_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/zaitsev_a_min_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..fa60eb12945 --- /dev/null +++ b/tasks/mpi/zaitsev_a_min_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,113 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/zaitsev_a_min_of_vector_elements/include/ops_mpi.hpp" + +namespace zaitsev_a_min_of_vector_elements_mpi { +std::vector getRandomVector(int sz, int minRangeValue, int maxRangeValue) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = minRangeValue + gen() % (maxRangeValue - minRangeValue + 1); + } + return vec; +} +} // namespace zaitsev_a_min_of_vector_elements_mpi + +TEST(zaitsev_a_min_of_vector_elements_mpi, test_pipeline_run) { + const int extrema = -1000; + const int minRangeValue = -500; + const int maxRangeValue = 500; + + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, maxRangeValue + 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10e6; + global_vec = zaitsev_a_min_of_vector_elements_mpi::getRandomVector(count_size_vector, minRangeValue, maxRangeValue); + global_vec[global_vec.size() / 2] = extrema; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto minOfVectorElementsParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(minOfVectorElementsParallel->validation(), true); + minOfVectorElementsParallel->pre_processing(); + minOfVectorElementsParallel->run(); + minOfVectorElementsParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(minOfVectorElementsParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(extrema, global_min[0]); + } +} + +TEST(zaitsev_a_min_of_vector_elements_mpi, test_task_run) { + const int extrema = -1000; + const int minRangeValue = -500; + const int maxRangeValue = 500; + + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, maxRangeValue + 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10e6; + global_vec = zaitsev_a_min_of_vector_elements_mpi::getRandomVector(count_size_vector, minRangeValue, maxRangeValue); + global_vec[count_size_vector / 2] = extrema; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto minOfVectorElementsParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(minOfVectorElementsParallel->validation(), true); + minOfVectorElementsParallel->pre_processing(); + minOfVectorElementsParallel->run(); + minOfVectorElementsParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(minOfVectorElementsParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(extrema, global_min[0]); + } +} diff --git a/tasks/mpi/zaitsev_a_min_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/zaitsev_a_min_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..a1edc120a45 --- /dev/null +++ b/tasks/mpi/zaitsev_a_min_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,99 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/zaitsev_a_min_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include + +bool zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsSequential::pre_processing() { + internal_order_test(); + + // Init value for input and output + input = std::vector(taskData->inputs_count[0]); + auto* interpreted_input = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < taskData->inputs_count[0]; i++) { + input[i] = interpreted_input[i]; + } + return true; +} + +bool zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs_count[0] != 0 && taskData->outputs_count[0] == 1) || + (taskData->inputs_count[0] == 0 && taskData->outputs_count[0] == 0); +} + +bool zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsSequential::run() { + internal_order_test(); + + int currentMin = input[0]; + for (auto i : input) currentMin = (currentMin > i) ? i : currentMin; + res = currentMin; + return true; +} + +bool zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], input_.begin()); + } + return true; +} + +bool zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsParallel::run() { + internal_order_test(); + + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + (proc - 1) * delta, delta); + } + } + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin() + (delta * (world.size() - 1)), input_.end()); + } else { + local_input_ = std::vector(delta); + world.recv(0, 0, local_input_.data(), delta); + } + + int local_res = INT_MAX; + if (!local_input_.empty()) { + local_res = local_input_[0]; + for (auto i : local_input_) local_res = (local_res > i) ? i : local_res; + } + reduce(world, local_res, res, boost::mpi::minimum(), 0); + + return true; +} + +bool zaitsev_a_min_of_vector_elements_mpi::MinOfVectorElementsParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/seq/zaitsev_a_min_of_vector_elements/func_tests/main.cpp b/tasks/seq/zaitsev_a_min_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..d74f10a842b --- /dev/null +++ b/tasks/seq/zaitsev_a_min_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,134 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/zaitsev_a_min_of_vector_elements/include/ops_seq.hpp" + +using namespace std::chrono_literals; + +TEST(zaitsev_a_min_of_vector_elements_sequentional, test_length_10) { + const int length = 10; + const int extrema = -1; + const int minRangeValue = 100; + const int maxRangeValue = 1000; + + std::mt19937 gen(31415); + + // Create data + std::vector in(length); + for (size_t i = 0; i < length; i++) { + int j = minRangeValue + gen() % (maxRangeValue - minRangeValue + 1); + in[i] = j; + } + in[length / 2] = extrema; + + std::vector out(1, extrema); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + zaitsev_a_min_of_vector_elements_seq::MinOfVectorElementsSequential task(taskDataSeq); + ASSERT_EQ(task.validation(), true); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(extrema, out[0]); +} +TEST(zaitsev_a_min_of_vector_elements_sequentional, test_length_50) { + const int length = 50; + const int extrema = -1; + const int minRangeValue = 100; + const int maxRangeValue = 1000; + + std::mt19937 gen(31415); + + // Create data + std::vector in(length); + for (size_t i = 0; i < length; i++) { + int j = minRangeValue + gen() % (maxRangeValue - minRangeValue + 1); + in[i] = j; + } + in[length / 2] = extrema; + std::vector out(1, extrema); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + zaitsev_a_min_of_vector_elements_seq::MinOfVectorElementsSequential task(taskDataSeq); + ASSERT_EQ(task.validation(), true); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(extrema, out[0]); +} + +TEST(zaitsev_a_min_of_vector_elements_sequentional, test_length_1) { + const int length = 1; + const int extrema = -1; + + std::mt19937 gen(31415); + + // Create data + std::vector in(length, extrema); + std::vector out(1, extrema); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + zaitsev_a_min_of_vector_elements_seq::MinOfVectorElementsSequential task(taskDataSeq); + ASSERT_EQ(task.validation(), true); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(extrema, out[0]); +} + +TEST(zaitsev_a_min_of_vector_elements_sequentional, test_vector_of_negative_elements) { + const int length = 10; + const int extrema = -105; + const int minRangeValue = -100; + const int maxRangeValue = -1; + + std::mt19937 gen(31415); + + // Create data + std::vector in(length); + for (size_t i = 0; i < length; i++) { + int j = minRangeValue + gen() % (maxRangeValue - minRangeValue + 1); + in[i] = j; + } + in[length / 2] = extrema; + std::vector out(1, extrema); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + zaitsev_a_min_of_vector_elements_seq::MinOfVectorElementsSequential task(taskDataSeq); + ASSERT_EQ(task.validation(), true); + task.pre_processing(); + task.run(); + task.post_processing(); + ASSERT_EQ(extrema, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/zaitsev_a_min_of_vector_elements/include/ops_seq.hpp b/tasks/seq/zaitsev_a_min_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..96fbdf5b246 --- /dev/null +++ b/tasks/seq/zaitsev_a_min_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace zaitsev_a_min_of_vector_elements_seq { + +class MinOfVectorElementsSequential : public ppc::core::Task { + public: + explicit MinOfVectorElementsSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input{}; + int res{}; +}; + +} // namespace zaitsev_a_min_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/zaitsev_a_min_of_vector_elements/perf_tests/main.cpp b/tasks/seq/zaitsev_a_min_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..227c4c68aec --- /dev/null +++ b/tasks/seq/zaitsev_a_min_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,106 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/zaitsev_a_min_of_vector_elements/include/ops_seq.hpp" + +TEST(zaitsev_a_min_of_vector_elements_sequentional, test_pipeline_run) { + const int length = 10e6; + const int extrema = -105; + const int minRangeValue = -100; + const int maxRangeValue = 1000; + + std::mt19937 gen(31415); + + // Create data + std::vector in(length); + for (size_t i = 0; i < length; i++) { + int j = minRangeValue + gen() % (maxRangeValue - minRangeValue + 1); + in[i] = j; + } + in[length / 2] = extrema; + + std::vector out(1, extrema); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(extrema, out[0]); +} + +TEST(zaitsev_a_min_of_vector_elements_sequentional, test_task_run) { + const int length = 10e6; + const int extrema = -105; + const int minRangeValue = -100; + const int maxRangeValue = 1000; + + std::mt19937 gen(31415); + + // Create data + std::vector in(length); + for (size_t i = 0; i < length; i++) { + int j = minRangeValue + gen() % (maxRangeValue - minRangeValue + 1); + in[i] = j; + } + in[length / 2] = extrema; + + std::vector out(1, extrema); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(extrema, out[0]); +} diff --git a/tasks/seq/zaitsev_a_min_of_vector_elements/src/ops_seq.cpp b/tasks/seq/zaitsev_a_min_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..558fcdda27f --- /dev/null +++ b/tasks/seq/zaitsev_a_min_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,36 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/zaitsev_a_min_of_vector_elements/include/ops_seq.hpp" + +bool zaitsev_a_min_of_vector_elements_seq::MinOfVectorElementsSequential::pre_processing() { + internal_order_test(); + + // Init value for input and output + input = std::vector(taskData->inputs_count[0]); + auto* interpreted_input = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < taskData->inputs_count[0]; i++) { + input[i] = interpreted_input[i]; + } + return true; +} + +bool zaitsev_a_min_of_vector_elements_seq::MinOfVectorElementsSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs_count[0] != 0 && taskData->outputs_count[0] == 1) || + (taskData->inputs_count[0] == 0 && taskData->outputs_count[0] == 0); +} + +bool zaitsev_a_min_of_vector_elements_seq::MinOfVectorElementsSequential::run() { + internal_order_test(); + + int currentMin = input[0]; + for (auto i : input) currentMin = (currentMin > i) ? i : currentMin; + res = currentMin; + return true; +} + +bool zaitsev_a_min_of_vector_elements_seq::MinOfVectorElementsSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} From 35695e9c5b1c7026d6c479e9c13e4a901cf8400a Mon Sep 17 00:00:00 2001 From: Bogdan__ <129730277+BogdanNaumov@users.noreply.github.com> Date: Sat, 9 Nov 2024 06:04:34 +0300 Subject: [PATCH 154/155] =?UTF-8?q?=D0=9D=D0=B0=D1=83=D0=BC=D0=BE=D0=B2=20?= =?UTF-8?q?=D0=91=D0=BE=D0=B3=D0=B4=D0=B0=D0=BD=20=D0=97=D0=B0=D0=B4=D0=B0?= =?UTF-8?q?=D1=87=D0=B0=201=20=D0=B2=D0=B0=D1=80=D0=B8=D0=B0=D0=BD=D1=82?= =?UTF-8?q?=2018=20=D0=9D=D0=B0=D1=85=D0=BE=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8?= =?UTF-8?q?=D0=B5=20=D0=BC=D0=B8=D0=BD=D0=B8=D0=BC=D0=B0=D0=BB=D1=8C=D0=BD?= =?UTF-8?q?=D1=8B=D1=85=20=D0=B7=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D0=B9?= =?UTF-8?q?=20=D0=BF=D0=BE=20=D1=81=D1=82=D0=BE=D0=BB=D0=B1=D1=86=D0=B0?= =?UTF-8?q?=D0=BC=20=D0=BC=D0=B0=D1=82=D1=80=D0=B8=D1=86=D1=8B=20=20(#221)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Задача заключается в нахождении минимальных значений в каждом столбце матрицы целых чисел. Входные данные представляют собой двумерный массив чисел, и результатом работы программы должен быть одномерный массив, содержащий минимальные элементы по каждому столбцу. Последовательная версия алгоритма: На этапе предобработки входные данные преобразуются в двумерный массив, где строки и столбцы задаются исходными размерами. Массив результата инициализируется минимальными значениями для каждого столбца. Алгоритм проходит по каждому столбцу и находит минимальный элемент, сравнивая значения элементов столбца. После выполнения алгоритма результат сохраняется в выходном массиве. Версия алгоритма с использованием MPI: Разделение данных: Процесс с рангом 0 получает исходную матрицу и делит ее на части по столбцам между всеми процессами. Каждый процесс получает свою часть столбцов, а процесс с рангом 0 также получает дополнительную часть, если количество столбцов не делится нацело на количество процессов. Вычисления: Каждый процесс ищет минимальные значения в своей части столбцов. Алгоритм сравнивает элементы столбцов и сохраняет минимальные значения для каждого столбца. Сбор результатов: Процесс с рангом 0 собирает частичные результаты от всех процессов и объединяет их в итоговый массив. Обработка результата: После сбора всех результатов, минимальные значения по каждому столбцу сохраняются в выходном массиве. --------- Co-authored-by: bogdan --- .../func_tests/main.cpp | 192 ++++++++++++++++++ .../include/ops_mpi.hpp | 44 ++++ .../perf_tests/main.cpp | 93 +++++++++ .../naumov_b_min_colum_matrix/src/ops_mpi.cpp | 153 ++++++++++++++ .../func_tests/main.cpp | 152 ++++++++++++++ .../include/ops_seq.hpp | 24 +++ .../perf_tests/main.cpp | 83 ++++++++ .../naumov_b_min_colum_matrix/src/ops_seq.cpp | 55 +++++ 8 files changed, 796 insertions(+) create mode 100644 tasks/mpi/naumov_b_min_colum_matrix/func_tests/main.cpp create mode 100644 tasks/mpi/naumov_b_min_colum_matrix/include/ops_mpi.hpp create mode 100644 tasks/mpi/naumov_b_min_colum_matrix/perf_tests/main.cpp create mode 100644 tasks/mpi/naumov_b_min_colum_matrix/src/ops_mpi.cpp create mode 100644 tasks/seq/naumov_b_min_colum_matrix/func_tests/main.cpp create mode 100644 tasks/seq/naumov_b_min_colum_matrix/include/ops_seq.hpp create mode 100644 tasks/seq/naumov_b_min_colum_matrix/perf_tests/main.cpp create mode 100644 tasks/seq/naumov_b_min_colum_matrix/src/ops_seq.cpp diff --git a/tasks/mpi/naumov_b_min_colum_matrix/func_tests/main.cpp b/tasks/mpi/naumov_b_min_colum_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..efc7b54c72f --- /dev/null +++ b/tasks/mpi/naumov_b_min_colum_matrix/func_tests/main.cpp @@ -0,0 +1,192 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/naumov_b_min_colum_matrix/include/ops_mpi.hpp" + +static std::vector getRandomVector(int size) { + std::vector vec(size); + for (int& element : vec) { + element = rand() % 201 - 100; + } + return vec; +} + +TEST(naumov_b_min_colum_matrix_mpi, Test_Min_Column) { + boost::mpi::communicator world; + const int rows = 40; + const int cols = 60; + std::vector global_matrix; + std::vector global_minima(cols, std::numeric_limits::max()); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomVector(cols * rows); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_minima.data())); + taskDataPar->outputs_count.emplace_back(global_minima.size()); + } + + naumov_b_min_colum_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_minima(cols, std::numeric_limits::max()); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_minima.data())); + taskDataSeq->outputs_count.emplace_back(reference_minima.size()); + + naumov_b_min_colum_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_minima, global_minima); + } +} + +TEST(naumov_b_min_colum_matrix_mpi, Test_Min_Column_10_40) { + boost::mpi::communicator world; + const int rows = 10; + const int cols = 40; + std::vector global_matrix; + std::vector global_minima(cols, std::numeric_limits::max()); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomVector(cols * rows); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_minima.data())); + taskDataPar->outputs_count.emplace_back(global_minima.size()); + } + + naumov_b_min_colum_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_minima(cols, std::numeric_limits::max()); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_minima.data())); + taskDataSeq->outputs_count.emplace_back(reference_minima.size()); + + naumov_b_min_colum_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_minima, global_minima); + } +} + +TEST(naumov_b_min_colum_matrix_mpi, Test_Min_Column_40_10) { + boost::mpi::communicator world; + const int rows = 40; + const int cols = 10; + std::vector global_matrix; + std::vector global_minima(cols, std::numeric_limits::max()); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomVector(cols * rows); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_minima.data())); + taskDataPar->outputs_count.emplace_back(global_minima.size()); + } + + naumov_b_min_colum_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_minima(cols, std::numeric_limits::max()); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_minima.data())); + taskDataSeq->outputs_count.emplace_back(reference_minima.size()); + + naumov_b_min_colum_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_minima, global_minima); + } +} + +TEST(naumov_b_min_colum_matrix_mpi, Test_Min_Column_Large_Matrix) { + boost::mpi::communicator world; + const int rows = 100; + const int cols = 100; + std::vector global_matrix; + std::vector global_minima(cols, std::numeric_limits::max()); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = getRandomVector(cols * rows); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_minima.data())); + taskDataPar->outputs_count.emplace_back(global_minima.size()); + } + + naumov_b_min_colum_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_minima(cols, std::numeric_limits::max()); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_minima.data())); + taskDataSeq->outputs_count.emplace_back(reference_minima.size()); + + naumov_b_min_colum_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_minima, global_minima); + } +} diff --git a/tasks/mpi/naumov_b_min_colum_matrix/include/ops_mpi.hpp b/tasks/mpi/naumov_b_min_colum_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..a18701916cd --- /dev/null +++ b/tasks/mpi/naumov_b_min_colum_matrix/include/ops_mpi.hpp @@ -0,0 +1,44 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace naumov_b_min_colum_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector local_vector_; + std::vector res; + boost::mpi::communicator world; +}; + +} // namespace naumov_b_min_colum_matrix_mpi diff --git a/tasks/mpi/naumov_b_min_colum_matrix/perf_tests/main.cpp b/tasks/mpi/naumov_b_min_colum_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..25e9509ab2f --- /dev/null +++ b/tasks/mpi/naumov_b_min_colum_matrix/perf_tests/main.cpp @@ -0,0 +1,93 @@ +// Copyright 2024 Your Name +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/naumov_b_min_colum_matrix/include/ops_mpi.hpp" + +static std::vector> getRandomMatrix(int rows, int columns) { + std::vector> matrix(rows, std::vector(columns)); + for (auto& row : matrix) { + for (int& element : row) { + element = rand() % 201 - 100; + } + } + return matrix; +} + +TEST(naumov_b_min_colum_matrix_mpi_perf, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_results; + + std::shared_ptr taskData = std::make_shared(); + int rows = 1000; + int cols = 1000; + if (world.rank() == 0) { + auto matrix = getRandomMatrix(rows, cols); + global_matrix.resize(rows * cols); + global_results.resize(cols); + + for (int i = 0; i < rows; ++i) { + std::copy(matrix[i].begin(), matrix[i].end(), global_matrix.begin() + i * cols); + } + + taskData->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskData->inputs_count = {static_cast(rows), static_cast(cols)}; + taskData->outputs.emplace_back(reinterpret_cast(global_results.data())); + taskData->outputs_count.push_back(static_cast(global_results.size())); + } + + auto task = std::make_shared(taskData); + ASSERT_EQ(task->validation(), true); + task->pre_processing(); + task->run(); + task->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(task); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(global_results.size(), static_cast(cols)); + } +} + +TEST(naumov_b_min_colum_matrix_mpi, test_column_minimum_task_run) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + + const int rows = 100; + const int cols = 100; + + std::vector> matrix = getRandomMatrix(rows, cols); + + if (world.rank() == 0) { + std::vector flatMatrix(rows * cols); + for (int i = 0; i < rows; ++i) { + std::copy(matrix[i].begin(), matrix[i].end(), flatMatrix.begin() + i * cols); + } + + taskDataPar->inputs.emplace_back(reinterpret_cast(flatMatrix.data())); + taskDataPar->inputs_count = {rows, cols}; + + std::vector output_results(cols); + taskDataPar->outputs.emplace_back(reinterpret_cast(output_results.data())); + taskDataPar->outputs_count = {static_cast(output_results.size())}; + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); +} diff --git a/tasks/mpi/naumov_b_min_colum_matrix/src/ops_mpi.cpp b/tasks/mpi/naumov_b_min_colum_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..7d9d56dcb8f --- /dev/null +++ b/tasks/mpi/naumov_b_min_colum_matrix/src/ops_mpi.cpp @@ -0,0 +1,153 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/naumov_b_min_colum_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include + +bool naumov_b_min_colum_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + input_.resize(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + for (unsigned j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[i * taskData->inputs_count[1] + j]; + } + } + + res.resize(taskData->inputs_count[1], std::numeric_limits::max()); + return true; +} + +bool naumov_b_min_colum_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == taskData->inputs_count[1]; +} + +bool naumov_b_min_colum_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + size_t numRows = input_.size(); + size_t numCols = input_[0].size(); + + for (size_t j = 0; j < numCols; j++) { + res[j] = input_[0][j]; + for (size_t i = 1; i < numRows; i++) { + res[j] = std::min(res[j], input_[i][j]); + } + } + + return true; +} + +bool naumov_b_min_colum_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + std::copy(res.begin(), res.end(), reinterpret_cast(taskData->outputs[0])); + return true; +} + +bool naumov_b_min_colum_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[1] * taskData->inputs_count[0], 0); + auto* temp = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + for (unsigned j = 0; j < taskData->inputs_count[1]; j++) { + input_[i + j * taskData->inputs_count[0]] = temp[j + i * taskData->inputs_count[1]]; + } + } + } + + return true; +} + +bool naumov_b_min_colum_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if (taskData->inputs.empty() || taskData->outputs.empty()) { + return false; + } + if (taskData->inputs_count.size() < 2) { + return false; + } + if (taskData->inputs_count[0] == 0 || taskData->inputs_count[1] == 0) { + return false; + } + if (taskData->outputs_count[0] != taskData->inputs_count[1]) { + return false; + } + return true; + } + return true; +} +bool naumov_b_min_colum_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int rows = 0; + int cols = 0; + + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + } + + boost::mpi::broadcast(world, rows, 0); + boost::mpi::broadcast(world, cols, 0); + + int delta = cols / world.size(); + int extra = cols % world.size(); + + boost::mpi::broadcast(world, delta, 0); + boost::mpi::broadcast(world, extra, 0); + + int num_columns = delta + (world.rank() < extra ? 1 : 0); + + if (world.rank() == 0) { + for (int proc = 1; proc < world.size(); ++proc) { + int proc_start_col = proc * delta + std::min(proc, extra); + int proc_num_columns = delta + (proc < extra ? 1 : 0); + world.send(proc, 0, input_.data() + proc_start_col * rows, proc_num_columns * rows); + } + + local_vector_ = std::vector(input_.begin(), input_.begin() + num_columns * rows); + + } else { + local_vector_ = std::vector(num_columns * rows); + world.recv(0, 0, local_vector_.data(), num_columns * rows); + } + + std::vector local_res(num_columns, std::numeric_limits::max()); + + for (int i = 0; i < num_columns; ++i) { + for (int j = 0; j < rows; ++j) { + local_res[i] = std::min(local_res[i], local_vector_[j + i * rows]); + } + } + + if (world.rank() == 0) { + std::vector temp(delta, 0); + res.insert(res.end(), local_res.begin(), local_res.end()); + for (int i = 1; i < world.size(); i++) { + int recv_num_columns = delta + (i < extra ? 1 : 0); + temp.resize(recv_num_columns); + world.recv(i, 0, temp.data(), recv_num_columns); + res.insert(res.end(), temp.begin(), temp.end()); + } + } else { + world.send(0, 0, local_res.data(), num_columns); + } + + return true; +} + +bool naumov_b_min_colum_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + std::copy(res.begin(), res.end(), reinterpret_cast(taskData->outputs[0])); + } + + return true; +} diff --git a/tasks/seq/naumov_b_min_colum_matrix/func_tests/main.cpp b/tasks/seq/naumov_b_min_colum_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..e4c5c32c044 --- /dev/null +++ b/tasks/seq/naumov_b_min_colum_matrix/func_tests/main.cpp @@ -0,0 +1,152 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "seq/naumov_b_min_colum_matrix/include/ops_seq.hpp" + +TEST(naumov_b_min_colum_matrix_seq, Test_Min_Column_Values) { + std::vector input_data = {3, 5, 1, 4, 2, 6, 7, 8, 0}; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs_count = {3, 3}; + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_data.data())); + + int *output_data = new int[3]; + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_data)); + taskDataSeq->outputs_count.emplace_back(3); + + naumov_b_min_colum_matrix_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + EXPECT_EQ(output_data[0], 3); + EXPECT_EQ(output_data[1], 2); + EXPECT_EQ(output_data[2], 0); + + delete[] output_data; +} + +TEST(naumov_b_min_colum_matrix_seq, Test_Equal_Elements) { + std::vector input_data = {5, 5, 5, 5, 5, 5, 5, 5, 5}; + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs_count = {3, 3}; + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_data.data())); + + int *output_data = new int[3]; + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_data)); + taskDataSeq->outputs_count.emplace_back(3); + + naumov_b_min_colum_matrix_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + EXPECT_EQ(output_data[0], 5); + EXPECT_EQ(output_data[1], 5); + EXPECT_EQ(output_data[2], 5); + + delete[] output_data; +} + +TEST(naumov_b_min_colum_matrix_seq, Test_Random_Matrix_5_5) { + const int rows = 5; + const int cols = 5; + std::vector input_data(rows * cols); + + std::generate(input_data.begin(), input_data.end(), []() { return rand() % 100; }); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_data.data())); + + int *output_data = new int[cols]; + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_data)); + taskDataSeq->outputs_count.emplace_back(cols); + + naumov_b_min_colum_matrix_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + for (int j = 0; j < cols; ++j) { + int expected_min = std::numeric_limits::max(); + for (int i = 0; i < rows; ++i) { + expected_min = std::min(expected_min, input_data[i * cols + j]); + } + EXPECT_EQ(output_data[j], expected_min); + } + + delete[] output_data; +} + +TEST(naumov_b_min_colum_matrix_seq, Test_Random_Matrix_5_10) { + const int rows = 5; + const int cols = 10; + std::vector input_data(rows * cols); + + std::generate(input_data.begin(), input_data.end(), []() { return rand() % 100; }); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_data.data())); + + int *output_data = new int[cols]; + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_data)); + taskDataSeq->outputs_count.emplace_back(cols); + + naumov_b_min_colum_matrix_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + for (int j = 0; j < cols; ++j) { + int expected_min = std::numeric_limits::max(); + for (int i = 0; i < rows; ++i) { + expected_min = std::min(expected_min, input_data[i * cols + j]); + } + EXPECT_EQ(output_data[j], expected_min); + } + + delete[] output_data; +} + +TEST(naumov_b_min_colum_matrix_seq, Test_Random_Matrix_15_10) { + const int rows = 15; + const int cols = 10; + std::vector input_data(rows * cols); + + std::generate(input_data.begin(), input_data.end(), []() { return rand() % 100; }); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_data.data())); + + int *output_data = new int[cols]; + taskDataSeq->outputs.emplace_back(reinterpret_cast(output_data)); + taskDataSeq->outputs_count.emplace_back(cols); + + naumov_b_min_colum_matrix_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + for (int j = 0; j < cols; ++j) { + int expected_min = std::numeric_limits::max(); + for (int i = 0; i < rows; ++i) { + expected_min = std::min(expected_min, input_data[i * cols + j]); + } + EXPECT_EQ(output_data[j], expected_min); + } + + delete[] output_data; +} diff --git a/tasks/seq/naumov_b_min_colum_matrix/include/ops_seq.hpp b/tasks/seq/naumov_b_min_colum_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..140d1f7f6c4 --- /dev/null +++ b/tasks/seq/naumov_b_min_colum_matrix/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace naumov_b_min_colum_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res; +}; + +} // namespace naumov_b_min_colum_matrix_seq \ No newline at end of file diff --git a/tasks/seq/naumov_b_min_colum_matrix/perf_tests/main.cpp b/tasks/seq/naumov_b_min_colum_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..683c5dbd3b2 --- /dev/null +++ b/tasks/seq/naumov_b_min_colum_matrix/perf_tests/main.cpp @@ -0,0 +1,83 @@ +// Copyright 2024 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/naumov_b_min_colum_matrix/include/ops_seq.hpp" + +TEST(sequential_naumov_b_min_colum_matrix_perf_test, test_pipeline_run) { + const int rows = 1000; + const int cols = 1000; + + std::vector in(rows * cols); + std::generate(in.begin(), in.end(), []() { return rand() % 100; }); + + std::vector out(cols, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count = {cols}; + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(out.size(), static_cast(cols)); +} + +TEST(sequential_naumov_b_min_colum_matrix_perf_test, test_task_run) { + const int rows = 1000; + const int cols = 1000; + + std::vector in(rows * cols); + std::generate(in.begin(), in.end(), []() { return rand() % 100; }); + + std::vector out(cols, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count = {rows, cols}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count = {cols}; + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; // Convert to seconds + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(out.size(), static_cast(cols)); +} diff --git a/tasks/seq/naumov_b_min_colum_matrix/src/ops_seq.cpp b/tasks/seq/naumov_b_min_colum_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..d70226b82b1 --- /dev/null +++ b/tasks/seq/naumov_b_min_colum_matrix/src/ops_seq.cpp @@ -0,0 +1,55 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/naumov_b_min_colum_matrix/include/ops_seq.hpp" + +bool naumov_b_min_colum_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + int st = taskData->inputs_count[0]; + int cl = taskData->inputs_count[1]; + + input_.resize(st, std::vector(cl)); + + int* input_data = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < st; ++i) { + for (int j = 0; j < cl; ++j) { + input_[i][j] = input_data[i * cl + j]; + } + } + res.resize(cl); + + return true; +} + +bool naumov_b_min_colum_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + + return (taskData->inputs_count.size() >= 2) && (!taskData->inputs.empty()) && (!taskData->outputs.empty()); +} + +bool naumov_b_min_colum_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + size_t numRows = input_.size(); + size_t numCols = input_[0].size(); + + for (size_t j = 0; j < numCols; j++) { + res[j] = input_[0][j]; + for (size_t i = 1; i < numRows; i++) { + if (input_[i][j] < res[j]) { + res[j] = input_[i][j]; + } + } + } + + return true; +} + +bool naumov_b_min_colum_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + int* output_ = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res.size(); i++) { + output_[i] = res[i]; + } + return true; +} From f8603622664272dd0553007ce0b1219cec49c74b Mon Sep 17 00:00:00 2001 From: Dmitriy Durynichev <129729757+ofuuse@users.noreply.github.com> Date: Sat, 9 Nov 2024 06:05:31 +0300 Subject: [PATCH 155/155] =?UTF-8?q?=D0=94=D1=83=D1=80=D1=8B=D0=BD=D0=B8?= =?UTF-8?q?=D1=87=D0=B5=D0=B2=20=D0=94=D0=BC=D0=B8=D1=82=D1=80=D0=B8=D0=B9?= =?UTF-8?q?.=20=D0=97=D0=B0=D0=B4=D0=B0=D1=87=D0=B0=201.=20=D0=92=D0=B0?= =?UTF-8?q?=D1=80=D0=B8=D0=B0=D0=BD=D1=82=208.=20=D0=9D=D0=B0=D1=85=D0=BE?= =?UTF-8?q?=D0=B6=D0=B4=D0=B5=D0=BD=D0=B8=D0=B5=20=D0=BD=D0=B0=D0=B8=D0=B1?= =?UTF-8?q?=D0=BE=D0=BB=D0=B5=D0=B5=20=D0=BE=D1=82=D0=BB=D0=B8=D1=87=D0=B0?= =?UTF-8?q?=D1=8E=D1=89=D0=B8=D1=85=D1=81=D1=8F=20=D0=BF=D0=BE=20=D0=B7?= =?UTF-8?q?=D0=BD=D0=B0=D1=87=D0=B5=D0=BD=D0=B8=D1=8E=20=D1=81=D0=BE=D1=81?= =?UTF-8?q?=D0=B5=D0=B4=D0=BD=D0=B8=D1=85=20=D1=8D=D0=BB=D0=B5=D0=BC=D0=B5?= =?UTF-8?q?=D0=BD=D1=82=D0=BE=D0=B2=20=D0=B2=D0=B5=D0=BA=D1=82=D0=BE=D1=80?= =?UTF-8?q?=D0=B0.=20(#249)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Описание алгоритмов ### Последовательная программа: 1. Инициализация и валидация: Проверяется, что входной массив содержит не менее двух элементов (иначе расчет разницы невозможен) и что есть место для записи двух элементов результата. 2. Предобработка: Загружаем массив целых чисел из структуры taskData и сохраняем его в вектор input. Инициализируем вектор result размером в два элемента для хранения результата (элементов с наибольшей разницей). 3. Вычисление максимальной разницы: Устанавливаем начальные значения для наибольшей разницы maxDiff и для пары элементов с этой разницей. Проходим по вектору input, начиная со второго элемента, и на каждой итерации вычисляем разницу diff по модулю между текущим элементом и предыдущим. Если diff больше maxDiff, обновляем maxDiff и сохраняем элементы текущей пары в result, таким образом отслеживая пару соседних элементов с наибольшей разницей. 4. Постобработка: Сохраняем результат (два элемента с максимальной разницей) в выходной массив taskData->outputs. ### Параллельная программа: 1. Инициализация и валидация: Проверяется, что входной массив содержит не менее двух элементов и что в выходном массиве можно хранить два элемента результата. Только процесс с рангом 0 выполняет данную проверку. 2. Предобработка (распределение данных между процессами): Процесс с рангом 0 загружает вектор input из структуры taskData. Рассчитывается размер подмассива (chunk_size), который будет обрабатываться каждым процессом, исходя из общего количества процессов и размера вектора input. Процесс с рангом 0 отправляет каждому процессу соответствующую часть вектора с помощью boost::mpi::send, включая смещение (начальный индекс) этой части в исходном векторе. 3. Локальный расчет в каждом процессе: Каждый процесс принимает свою часть данных (chunk) и начальный индекс chunkStart. Каждый процесс находит в своем подмассиве пару соседних элементов с наибольшей разницей, сохраняя результат в виде структуры chunk_result, содержащей индексы пары элементов и величину разницы между ними. 4. Сбор данных с процессов и вычисление глобального результата: С помощью функции boost::mpi::reduce, пара элементов с максимальной разницей выбирается среди всех подмассивов. Оператор MPI_MAX находит максимальное значение разницы, и итоговый результат сохраняется в result. Процесс с рангом 0 получает глобальный результат. 5. Постобработка: Процесс с рангом 0 сохраняет итоговый результат (два элемента с максимальной разницей) в выходной массив taskData->outputs. --- .../func_tests/main.cpp | 349 ++++++++++++++++++ .../include/ops_mpi.hpp | 71 ++++ .../perf_tests/main.cpp | 80 ++++ .../src/ops_mpi.cpp | 93 +++++ .../func_tests/main.cpp | 103 ++++++ .../include/ops_seq.hpp | 22 ++ .../perf_tests/main.cpp | 78 ++++ .../src/ops_seq.cpp | 38 ++ 8 files changed, 834 insertions(+) create mode 100644 tasks/mpi/durynichev_d_most_different_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/mpi/durynichev_d_most_different_neighbor_elements/include/ops_mpi.hpp create mode 100644 tasks/mpi/durynichev_d_most_different_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/mpi/durynichev_d_most_different_neighbor_elements/src/ops_mpi.cpp create mode 100644 tasks/seq/durynichev_d_most_different_neighbor_elements/func_tests/main.cpp create mode 100644 tasks/seq/durynichev_d_most_different_neighbor_elements/include/ops_seq.hpp create mode 100644 tasks/seq/durynichev_d_most_different_neighbor_elements/perf_tests/main.cpp create mode 100644 tasks/seq/durynichev_d_most_different_neighbor_elements/src/ops_seq.cpp diff --git a/tasks/mpi/durynichev_d_most_different_neighbor_elements/func_tests/main.cpp b/tasks/mpi/durynichev_d_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..e1a6df3e0a5 --- /dev/null +++ b/tasks/mpi/durynichev_d_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,349 @@ +#include + +#include "mpi/durynichev_d_most_different_neighbor_elements/include/ops_mpi.hpp" +namespace durynichev_d_most_different_neighbor_elements_mpi { + +std::vector getRandomVector(size_t size) { + auto device = std::random_device(); + auto generator = std::mt19937(device()); + auto distribution = std::uniform_int_distribution(0, 100'000); + auto vector = std::vector(size); + for (auto &val : vector) { + val = distribution(generator); + } + return vector; +} +} // namespace durynichev_d_most_different_neighbor_elements_mpi + +TEST(durynichev_d_most_different_neighbor_elements_mpi, default_vector) { + boost::mpi::communicator world; + std::vector input; + + std::vector outputPar{0, 0}; + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + input = durynichev_d_most_different_neighbor_elements_mpi::getRandomVector(20'000); + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(outputPar.data())); + taskDataPar->outputs_count.emplace_back(outputPar.size()); + } + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector outputSeq{0, 0}; + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(outputSeq.data())); + taskDataSeq->outputs_count.emplace_back(outputSeq.size()); + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(outputSeq, outputPar); + } +} + +TEST(durynichev_d_most_different_neighbor_elements_mpi, huge_vector) { + boost::mpi::communicator world; + std::vector input; + + std::vector outputPar{0, 0}; + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + input = durynichev_d_most_different_neighbor_elements_mpi::getRandomVector(200'000); + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(outputPar.data())); + taskDataPar->outputs_count.emplace_back(outputPar.size()); + } + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector outputSeq{0, 0}; + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(outputSeq.data())); + taskDataSeq->outputs_count.emplace_back(outputSeq.size()); + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(outputSeq, outputPar); + } +} + +TEST(durynichev_d_most_different_neighbor_elements_mpi, zero_elements) { + boost::mpi::communicator world; + std::vector input(10'000, 0); + + std::vector outputPar{0, 0}; + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(outputPar.data())); + taskDataPar->outputs_count.emplace_back(outputPar.size()); + } + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector outputSeq{0, 0}; + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(outputSeq.data())); + taskDataSeq->outputs_count.emplace_back(outputSeq.size()); + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(outputSeq, outputPar); + } +} +TEST(durynichev_d_most_different_neighbor_elements_mpi, fixed_values_vector) { + boost::mpi::communicator world; + std::vector input = {5368, 925, 3210, 500, 4893, 90, 6589, 5367}; + + std::vector outputPar{0, 0}; + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(outputPar.data())); + taskDataPar->outputs_count.emplace_back(outputPar.size()); + } + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector outputSeq{0, 0}; + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(outputSeq.data())); + taskDataSeq->outputs_count.emplace_back(outputSeq.size()); + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(outputSeq, outputPar); + } +} + +TEST(durynichev_d_most_different_neighbor_elements_mpi, two_elements) { + boost::mpi::communicator world; + std::vector input = {0, 100000}; + + std::vector outputPar{0, 0}; + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(outputPar.data())); + taskDataPar->outputs_count.emplace_back(outputPar.size()); + } + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector outputSeq{0, 0}; + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(outputSeq.data())); + taskDataSeq->outputs_count.emplace_back(outputSeq.size()); + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(outputSeq, outputPar); + } +} +TEST(durynichev_d_most_different_neighbor_elements_mpi, all_elements_equal) { + boost::mpi::communicator world; + std::vector input(100, 42); + + std::vector outputPar{0, 0}; + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(outputPar.data())); + taskDataPar->outputs_count.emplace_back(outputPar.size()); + } + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector outputSeq{0, 0}; + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(outputSeq.data())); + taskDataSeq->outputs_count.emplace_back(outputSeq.size()); + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(outputSeq, outputPar); + } +} + +TEST(durynichev_d_most_different_neighbor_elements_mpi, alternating_max_min) { + boost::mpi::communicator world; + std::vector input = {100000, 0, 100000, 0, 100000, 0}; + + std::vector outputPar{0, 0}; + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(outputPar.data())); + taskDataPar->outputs_count.emplace_back(outputPar.size()); + } + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector outputSeq{0, 0}; + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(outputSeq.data())); + taskDataSeq->outputs_count.emplace_back(outputSeq.size()); + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(outputSeq, outputPar); + } +} + +TEST(durynichev_d_most_different_neighbor_elements_mpi, monotonically_increasing) { + boost::mpi::communicator world; + std::vector input = {1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; + + std::vector outputPar{0, 0}; + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(outputPar.data())); + taskDataPar->outputs_count.emplace_back(outputPar.size()); + } + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector outputSeq{0, 0}; + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(outputSeq.data())); + taskDataSeq->outputs_count.emplace_back(outputSeq.size()); + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(outputSeq, outputPar); + } +} + +TEST(durynichev_d_most_different_neighbor_elements_mpi, repeating_groups) { + boost::mpi::communicator world; + std::vector input = {10, 10, 20, 20, 10, 10, 20, 20}; + + std::vector outputPar{0, 0}; + auto taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataPar->inputs_count.emplace_back(input.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(outputPar.data())); + taskDataPar->outputs_count.emplace_back(outputPar.size()); + } + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector outputSeq{0, 0}; + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input.data())); + taskDataSeq->inputs_count.emplace_back(input.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(outputSeq.data())); + taskDataSeq->outputs_count.emplace_back(outputSeq.size()); + + durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(outputSeq, outputPar); + } +} diff --git a/tasks/mpi/durynichev_d_most_different_neighbor_elements/include/ops_mpi.hpp b/tasks/mpi/durynichev_d_most_different_neighbor_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..957ce7b85e7 --- /dev/null +++ b/tasks/mpi/durynichev_d_most_different_neighbor_elements/include/ops_mpi.hpp @@ -0,0 +1,71 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" +namespace durynichev_d_most_different_neighbor_elements_mpi { + +struct ChunkResult { + size_t left_index; + size_t right_index; + int diff; + + template + void serialize(Archive &archive, const unsigned int version) { + archive & left_index; + archive & right_index; + archive & diff; + } + + ChunkResult operator()(const ChunkResult &a, const ChunkResult &b) { + return (a.diff > b.diff || (a.diff == b.diff && (a.left_index < b.left_index))) ? a : b; + } + + std::vector toVector(const std::vector &input) const { + return std::vector{ + std::min(input[left_index], input[right_index]), + std::max(input[left_index], input[right_index]), + }; + } +}; + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input, result; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + boost::mpi::communicator world; + std::vector input, chunk; + int chunkStart = 0; + ChunkResult result{}; +}; + +} // namespace durynichev_d_most_different_neighbor_elements_mpi diff --git a/tasks/mpi/durynichev_d_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/durynichev_d_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..708eca9267e --- /dev/null +++ b/tasks/mpi/durynichev_d_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,80 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/durynichev_d_most_different_neighbor_elements/include/ops_mpi.hpp" + +TEST(durynichev_d_most_different_neighbor_elements_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec(20000000, 0); + std::vector global_diff(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_diff.data())); + taskDataPar->outputs_count.emplace_back(global_diff.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_diff[0]); + } +} + +TEST(durynichev_d_most_different_neighbor_elements_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec(20000000, 0); + std::vector global_sum(2, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, global_sum[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/durynichev_d_most_different_neighbor_elements/src/ops_mpi.cpp b/tasks/mpi/durynichev_d_most_different_neighbor_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..cafd593c890 --- /dev/null +++ b/tasks/mpi/durynichev_d_most_different_neighbor_elements/src/ops_mpi.cpp @@ -0,0 +1,93 @@ +#include "mpi/durynichev_d_most_different_neighbor_elements/include/ops_mpi.hpp" + +bool durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 2; +} + +bool durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + auto *input_ptr = reinterpret_cast(taskData->inputs[0]); + auto input_size = taskData->inputs_count[0]; + input.assign(input_ptr, input_ptr + input_size); + result.resize(2); + return true; +} + +bool durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + result[0] = input[0]; + result[1] = input[1]; + int maxDiff = 0; + + for (size_t i = 1; i < input.size(); ++i) { + int diff = std::abs(input[i] - input[i - 1]); + if (diff > maxDiff) { + maxDiff = diff; + result[0] = std::min(input[i], input[i - 1]); + result[1] = std::max(input[i], input[i - 1]); + } + } + return true; +} + +bool durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + std::copy_n(result.begin(), 2, reinterpret_cast(taskData->outputs[0])); + return true; +} + +bool durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 2; + } + return true; +} + +bool durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto input_size = taskData->inputs_count[0]; + auto chunk_size = input_size / world.size(); + std::cout << chunk_size << std::endl; + auto *input_ptr = reinterpret_cast(taskData->inputs[0]); + input.assign(input_ptr, input_ptr + input_size); + chunk.assign(input_ptr, input_ptr + chunk_size + int(world.size() > 1)); + + for (int proc = 1; proc < world.size(); proc++) { + auto start = proc * chunk_size; + auto size = (proc == world.size() - 1) ? input_size - start : chunk_size + 1; + world.send(proc, 0, std::vector(input_ptr + start, input_ptr + start + size)); + world.send(proc, 1, start); + } + + } else { + world.recv(0, 0, chunk); + world.recv(0, 1, chunkStart); + } + + return true; +} + +bool durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + auto chunk_result = ChunkResult{0, 1, std::abs(chunk[0] - chunk[1])}; + for (size_t i = 2; i < chunk.size(); ++i) { + int diff = std::abs(chunk[i] - chunk[i - 1]); + if (diff > chunk_result.diff) { + chunk_result = ChunkResult{i - 1 + chunkStart, i + chunkStart, diff}; + } + } + boost::mpi::reduce(world, chunk_result, result, ChunkResult(), 0); + return true; +} + +bool durynichev_d_most_different_neighbor_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + std::copy_n(result.toVector(input).begin(), 2, reinterpret_cast(taskData->outputs[0])); + } + return true; +} diff --git a/tasks/seq/durynichev_d_most_different_neighbor_elements/func_tests/main.cpp b/tasks/seq/durynichev_d_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..adf186fa1b6 --- /dev/null +++ b/tasks/seq/durynichev_d_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,103 @@ +#include + +#include "seq/durynichev_d_most_different_neighbor_elements/include/ops_seq.hpp" + +TEST(durynichev_d_most_different_neighbor_elements_seq, functionality_check) { + // Create data + std::vector in{1, 5, 2, 10, 3}; + std::vector out{0, 0}; + std::vector want{2, 10}; + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + durynichev_d_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(want, out); +} + +TEST(durynichev_d_most_different_neighbor_elements_seq, all_elements_are_equal) { + // Create data + std::vector in{1, 1, 1, 1, 1, 1}; + std::vector out{0, 0}; + std::vector want{1, 1}; + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + durynichev_d_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(want, out); +} + +TEST(durynichev_d_most_different_neighbor_elements_seq, check_double_number) { + // Create data + std::vector in{-10, 10}; + std::vector out{0, 0}; + std::vector want{-10, 10}; + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + durynichev_d_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(want, out); +} + +TEST(durynichev_d_most_different_neighbor_elements_seq, check_one_number) { + // Create data + std::vector in{1}; + std::vector out{0, 0}; + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + durynichev_d_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(durynichev_d_most_different_neighbor_elements_seq, check_validate_func) { + // Create data + std::vector in{1, 5, 2, 10, 3}; + std::vector out{0}; + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + durynichev_d_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} \ No newline at end of file diff --git a/tasks/seq/durynichev_d_most_different_neighbor_elements/include/ops_seq.hpp b/tasks/seq/durynichev_d_most_different_neighbor_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..c4b32f66ad0 --- /dev/null +++ b/tasks/seq/durynichev_d_most_different_neighbor_elements/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace durynichev_d_most_different_neighbor_elements_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input, result; +}; + +} // namespace durynichev_d_most_different_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/durynichev_d_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/seq/durynichev_d_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..5635e5ebbd9 --- /dev/null +++ b/tasks/seq/durynichev_d_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,78 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/durynichev_d_most_different_neighbor_elements/include/ops_seq.hpp" + +TEST(durynichev_d_most_different_neighbor_elements_seq, test_pipeline_run) { + // Create data + std::vector in(10'000'000, 1); + std::vector out{0, 0}; + std::vector want{1, 1}; + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(want, out); +} + +TEST(durynichev_d_most_different_neighbor_elements_seq, test_task_run) { + // Create data + std::vector in(10'000'000, 1); + std::vector out{0, 0}; + std::vector want{1, 1}; + + // Create TaskData + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(want, out); +} diff --git a/tasks/seq/durynichev_d_most_different_neighbor_elements/src/ops_seq.cpp b/tasks/seq/durynichev_d_most_different_neighbor_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..b6d5bb210c8 --- /dev/null +++ b/tasks/seq/durynichev_d_most_different_neighbor_elements/src/ops_seq.cpp @@ -0,0 +1,38 @@ +#include "seq/durynichev_d_most_different_neighbor_elements/include/ops_seq.hpp" + +bool durynichev_d_most_different_neighbor_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 2; +} + +bool durynichev_d_most_different_neighbor_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + auto *input_ptr = reinterpret_cast(taskData->inputs[0]); + auto input_size = taskData->inputs_count[0]; + input.assign(input_ptr, input_ptr + input_size); + result.resize(2); + return true; +} + +bool durynichev_d_most_different_neighbor_elements_seq::TestTaskSequential::run() { + internal_order_test(); + result[0] = input[0]; + result[1] = input[1]; + int maxDiff = std::abs(input[0] - input[1]); + + for (size_t i = 1; i < input.size(); ++i) { + int diff = std::abs(input[i] - input[i - 1]); + if (diff > maxDiff) { + maxDiff = diff; + result[0] = std::min(input[i], input[i - 1]); + result[1] = std::max(input[i], input[i - 1]); + } + } + return true; +} + +bool durynichev_d_most_different_neighbor_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + std::copy_n(result.begin(), 2, reinterpret_cast(taskData->outputs[0])); + return true; +} \ No newline at end of file