From ad41d0f52f94dfd326c5a30958ce7d19a20c45b4 Mon Sep 17 00:00:00 2001 From: yanlan song Date: Sat, 7 Oct 2023 18:44:25 +0800 Subject: [PATCH] rework auto test cases (#19862) * initial commit Signed-off-by: fishbell * clean up Signed-off-by: fishbell * fix windows build failure Signed-off-by: fishbell * enable auto func tests Signed-off-by: fishbell * enable auto_func_test to ci Signed-off-by: fishbell * some clean up in gpu case Signed-off-by: fishbell * clang Signed-off-by: fishbell * fix build warning Signed-off-by: fishbell * enable new tests Signed-off-by: fishbell * fix build warning Signed-off-by: fishbell * enable consistency test Signed-off-by: fishbell * try fix build error on manylinux Signed-off-by: fishbell * enable cpplint Signed-off-by: fishbell * enable clang-format Signed-off-by: fishbell enable some tests Signed-off-by: fishbell * fix typo Signed-off-by: fishbell * clang for unit tests Signed-off-by: fishbell * fix merge conflict Signed-off-by: fishbell --------- Signed-off-by: fishbell --- .ci/azure/linux.yml | 3 + .ci/azure/windows.yml | 3 + .github/workflows/linux.yml | 6 + .github/workflows/windows.yml | 5 + src/inference/src/infer_request.cpp | 2 + src/plugins/auto/src/plugin.cpp | 5 +- src/plugins/auto/tests/CMakeLists.txt | 1 + .../auto/tests/functional/CMakeLists.txt | 34 + ...sync_compiled_for_multiple_device_test.cpp | 97 +++ .../functional/behavior/auto_func_test.cpp | 790 ++++++++++++++++++ .../functional/behavior/auto_func_test.hpp | 133 +++ .../functional/behavior/caching_test.cpp | 60 ++ .../functional/behavior/callback_test.cpp | 116 +++ .../behavior/infer_consistency_test.cpp | 25 + .../behavior/infer_consistency_test.hpp | 105 +++ .../behavior/infer_multi_threading_tests.cpp | 114 +++ .../tests/functional/behavior/io_tensor.cpp | 172 ++++ .../tests/functional/behavior/io_tensor.hpp | 51 ++ .../behavior/life_time_batch_enabled_test.cpp | 57 ++ .../functional/behavior/property_test.cpp | 99 +++ .../behavior/remote_tensor_test.cpp | 104 +++ .../tests/functional/behavior/wait_test.cpp | 73 ++ .../executable_network/exec_network_base.cpp | 44 + .../executable_network/get_metric.cpp | 36 + .../behavior/infer_request/callback.cpp | 23 + .../behavior/infer_request/io_blob.cpp | 29 + .../behavior/infer_request/memory_states.cpp | 20 +- .../behavior/infer_request/multitheading.cpp | 27 + .../behavior/infer_request/perf_counters.cpp | 24 + .../infer_request/set_blob_by_type.cpp | 33 + .../behavior/infer_request/wait.cpp | 28 + .../core_integration.cpp | 30 + .../exec_network_base.cpp | 35 + .../ov_exec_net_import_export.cpp | 32 + .../ov_executable_network/properties.cpp | 144 ++++ .../behavior/ov_infer_request/callback.cpp | 25 + .../infer_request_dynamic.cpp | 47 ++ .../ov_infer_request/inference_chaining.cpp | 25 + .../behavior/ov_infer_request/io_tensor.cpp | 74 ++ .../ov_infer_request/multithreading.cpp | 26 + .../ov_infer_request/perf_counters.cpp | 26 + .../behavior/ov_infer_request/wait.cpp | 26 + .../behavior/ov_plugin/caching_tests.cpp | 47 ++ .../behavior/ov_plugin/core_integration.cpp | 60 ++ .../behavior/ov_plugin/life_time.cpp | 22 + .../behavior/ov_plugin/properties_tests.cpp | 165 ++++ .../behavior/plugin/configuration_tests.cpp | 191 +++++ .../behavior/plugin/core_integration.cpp | 46 + .../behavior/plugin/core_threading_tests.cpp | 37 + .../behavior/plugin/set_preprocess.cpp | 84 ++ .../behavior/plugin/version.cpp | 18 + .../shared_tests_instances/core_config.cpp | 17 + .../set_device_name.cpp | 17 + .../skip_tests_config.cpp | 80 ++ .../auto/tests/unit/auto_unit_test.cpp | 321 +++---- .../tests/unit/compile_model_metric_test.cpp | 268 +++--- .../unit/compile_model_property_test.cpp | 132 +-- src/plugins/auto/tests/unit/ctput_test.cpp | 63 +- .../tests/unit/default_perf_hint_test.cpp | 173 ++-- .../auto/tests/unit/dynamic_output_test.cpp | 71 +- .../auto/tests/unit/get_device_list.cpp | 131 +-- .../tests/unit/include/auto_unit_test.hpp | 121 ++- .../auto/tests/unit/include/gmock_plugin.hpp | 53 +- .../auto/tests/unit/include/mock_common.hpp | 145 ---- .../tests/unit/include/mock_log_utils.hpp | 10 +- .../tests/unit/key_network_priority_test.cpp | 391 +++++---- .../auto/tests/unit/life_time_test.cpp | 84 ++ .../auto/tests/unit/log_utils_format_test.cpp | 134 +-- .../auto/tests/unit/log_utils_test.cpp | 95 +-- src/plugins/auto/tests/unit/mock_common.cpp | 62 -- .../tests/unit/parse_meta_device_test.cpp | 72 +- src/plugins/auto/tests/unit/property_test.cpp | 100 --- .../auto/tests/unit/release_helper_test.cpp | 87 +- .../auto/tests/unit/runtime_fallback_test.cpp | 228 +++-- .../tests/unit/select_device_failed_test.cpp | 120 +-- .../auto/tests/unit/select_device_test.cpp | 102 ++- .../auto/tests/unit/set_log_level_test.cpp | 19 +- .../unit/startup_fallback_property_test.cpp | 47 +- .../auto_batch/src/sync_infer_request.cpp | 8 +- .../executable_network/exec_network_base.cpp | 39 - .../executable_network/get_metric.cpp | 10 +- .../behavior/infer_request/callback.cpp | 16 - .../behavior/infer_request/config.cpp | 35 - .../behavior/infer_request/io_blob.cpp | 21 - .../behavior/infer_request/memory_states.cpp | 22 - .../behavior/infer_request/multitheading.cpp | 17 - .../behavior/infer_request/perf_counters.cpp | 21 - .../infer_request/set_blob_by_type.cpp | 15 - .../behavior/infer_request/wait.cpp | 21 - .../core_integration.cpp | 4 +- .../exec_network_base.cpp | 37 - .../ov_exec_net_import_export.cpp | 10 - .../ov_executable_network/properties.cpp | 117 +-- .../behavior/ov_infer_request/callback.cpp | 16 - .../ov_infer_request/infer_consistency.cpp | 31 - .../infer_request_dynamic.cpp | 15 - .../ov_infer_request/inference_chaining.cpp | 16 - .../behavior/ov_infer_request/io_tensor.cpp | 48 -- .../ov_infer_request/iteration_chaining.cpp | 11 - .../ov_infer_request/multithreading.cpp | 17 - .../ov_infer_request/perf_counters.cpp | 20 - .../behavior/ov_infer_request/wait.cpp | 21 - .../behavior/ov_plugin/caching_tests.cpp | 47 -- .../behavior/ov_plugin/core_integration.cpp | 21 - .../behavior/ov_plugin/life_time.cpp | 4 +- .../behavior/ov_plugin/properties_tests.cpp | 125 +-- .../behavior/plugin/configuration_tests.cpp | 150 ---- .../behavior/plugin/core_integration.cpp | 12 +- .../behavior/plugin/core_threading_tests.cpp | 7 - .../behavior/plugin/set_preprocess.cpp | 76 -- .../behavior/plugin/version.cpp | 8 - .../multi/cpu_remote_blob_tests.cpp | 15 - .../skip_tests_config.cpp | 19 +- .../intel_gpu/tests/functional/CMakeLists.txt | 11 - .../executable_network/exec_net_base.cpp | 28 - .../executable_network/get_metric.cpp | 10 +- .../behavior/infer_request/callback.cpp | 24 - .../behavior/infer_request/config.cpp | 12 - .../behavior/infer_request/io_blob.cpp | 25 - .../behavior/infer_request/multithreading.cpp | 25 - .../behavior/infer_request/perf_counters.cpp | 26 - .../infer_request/set_blob_by_type.cpp | 16 +- .../behavior/infer_request/wait.cpp | 24 - .../ov_executable_network/exec_net_base.cpp | 14 - .../ov_executable_network/get_metric.cpp | 137 +-- .../behavior/ov_infer_request/callback.cpp | 16 - .../ov_infer_request/infer_consistency.cpp | 61 -- .../infer_request_dynamic.cpp | 64 -- .../behavior/ov_infer_request/io_tensor.cpp | 34 - .../ov_infer_request/multithreading.cpp | 16 - .../ov_infer_request/perf_counters.cpp | 50 -- .../behavior/ov_infer_request/wait.cpp | 20 - .../behavior/ov_plugin/caching_tests.cpp | 38 - .../behavior/ov_plugin/life_time.cpp | 18 +- .../behavior/ov_plugin/properties_tests.cpp | 105 +-- .../behavior/ov_plugin/remote.cpp | 12 - .../behavior/plugin/caching_tests.cpp | 21 - .../behavior/plugin/configuration_tests.cpp | 112 --- .../behavior/plugin/core_integration.cpp | 10 +- .../behavior/plugin/set_preprocess.cpp | 26 - .../behavior/plugin/version.cpp | 8 - .../multi/gpu_remote_blob_tests.cpp | 154 ---- .../skip_tests_config.cpp | 14 +- 143 files changed, 5065 insertions(+), 3760 deletions(-) create mode 100644 src/plugins/auto/tests/functional/CMakeLists.txt create mode 100644 src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/auto_func_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/auto_func_test.hpp create mode 100644 src/plugins/auto/tests/functional/behavior/caching_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/callback_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/infer_consistency_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/infer_consistency_test.hpp create mode 100644 src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/io_tensor.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/io_tensor.hpp create mode 100644 src/plugins/auto/tests/functional/behavior/life_time_batch_enabled_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/property_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp create mode 100644 src/plugins/auto/tests/functional/behavior/wait_test.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp rename src/plugins/{intel_gpu => auto}/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp (53%) create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/version.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/set_device_name.cpp create mode 100644 src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp delete mode 100644 src/plugins/auto/tests/unit/include/mock_common.hpp create mode 100644 src/plugins/auto/tests/unit/life_time_test.cpp delete mode 100644 src/plugins/auto/tests/unit/mock_common.cpp delete mode 100644 src/plugins/auto/tests/unit/property_test.cpp delete mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/multi/cpu_remote_blob_tests.cpp delete mode 100644 src/plugins/intel_gpu/tests/functional/shared_tests_instances/multi/gpu_remote_blob_tests.cpp diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index a4d710bafc2112..8626f9d609ed0e 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -411,6 +411,9 @@ jobs: - script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_unit_tests.xml displayName: 'AUTO UT' + - script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_func_tests.xml + displayName: 'AUTO FuncTests' + - script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_batch_unit_tests.xml displayName: 'AutoBatch UT' diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index cbff7caa7533cd..a36238fca4a874 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -305,6 +305,9 @@ jobs: - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_unit_tests.xml displayName: 'AUTO UT' + - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_func_tests.xml + displayName: 'AUTO FuncTests' + - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_batch_unit_tests.xml displayName: 'AutoBatch UT' diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 010d5fdb4b8411..16affeb9bfa01c 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -733,6 +733,12 @@ jobs: ${INSTALL_TEST_DIR}/ov_auto_unit_tests --gtest_print_time=1 \ --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_unit_tests.xml + - name: AUTO func Tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml + - name: Template plugin func tests run: | source ${INSTALL_DIR}/setupvars.sh diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e4b2f912d23a94..e8b539c7a1d49d 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -595,6 +595,11 @@ jobs: run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml + - name: AUTO FuncTests + shell: cmd + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml + - name: Template plugin tests shell: cmd run: | diff --git a/src/inference/src/infer_request.cpp b/src/inference/src/infer_request.cpp index 18b97cdf7b7eb8..1023479546d1a8 100644 --- a/src/inference/src/infer_request.cpp +++ b/src/inference/src/infer_request.cpp @@ -30,6 +30,8 @@ OPENVINO_SUPPRESS_DEPRECATED_START __VA_ARGS__; \ } catch (const ::InferenceEngine::RequestBusy& ex) { \ ov::Busy::create(ex.what()); \ + } catch (const ov::Busy&) { \ + throw; \ } catch (const std::exception& ex) { \ OPENVINO_THROW(ex.what()); \ } catch (...) { \ diff --git a/src/plugins/auto/src/plugin.cpp b/src/plugins/auto/src/plugin.cpp index aea9af9a4c79b6..8e5131b7a3c1bb 100644 --- a/src/plugins/auto/src/plugin.cpp +++ b/src/plugins/auto/src/plugin.cpp @@ -415,8 +415,9 @@ std::shared_ptr Plugin::compile_model_impl(const std::string load_config.set_user_property(pre_process_config(properties)); load_config.apply_user_properties(); if (!work_mode_auto) { - if (iter_config != properties.end() && iter_config->second != "THROUGHPUT") { - LOG_WARNING_TAG("User set perf_hint:%s, but MULTI supports THROUGHPUT only", iter_config->second.as().c_str()); + if (iter_config != properties.end() && iter_config->second.as() != "THROUGHPUT") { + LOG_WARNING_TAG("User set perf_hint:%s, but MULTI supports THROUGHPUT only", + iter_config->second.as().c_str()); } load_config.set_property(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); } diff --git a/src/plugins/auto/tests/CMakeLists.txt b/src/plugins/auto/tests/CMakeLists.txt index c9273285747381..bce0f68667ca23 100644 --- a/src/plugins/auto/tests/CMakeLists.txt +++ b/src/plugins/auto/tests/CMakeLists.txt @@ -8,4 +8,5 @@ if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) endif() +add_subdirectory(functional) add_subdirectory(unit) \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/CMakeLists.txt b/src/plugins/auto/tests/functional/CMakeLists.txt new file mode 100644 index 00000000000000..44bef91f8fa1d9 --- /dev/null +++ b/src/plugins/auto/tests/functional/CMakeLists.txt @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_NAME ov_auto_func_tests) + +if(ENABLE_AUTO_BATCH) + list(APPEND DEPENDENCIES openvino_auto_batch_plugin) + list(APPEND COMPILE_DEFINITIONS ENABLE_AUTO_BATCH) +endif() + +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + ie_add_compiler_flags(/wd4305) +endif() + +ov_add_test_target( + NAME ${TARGET_NAME} + ROOT ${CMAKE_CURRENT_SOURCE_DIR} + LINK_LIBRARIES + openvino::runtime::dev + gtest + gtest_main + openvino::funcSharedTests + INCLUDES + ${CMAKE_CURRENT_SOURCE_DIR} + ${TEST_COMMON_INCLUDE_DIR} + ADD_CLANG_FORMAT + LABELS + Multi + Auto +) + +target_compile_definitions(${TARGET_NAME} PRIVATE ${COMPILE_DEFINITIONS}) +set_ie_threading_interface_for(${TARGET_NAME}) \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp b/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp new file mode 100644 index 00000000000000..bfd26cc6ffd260 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/async_compiled_for_multiple_device_test.cpp @@ -0,0 +1,97 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" +#ifdef __GLIBC__ +# include +# if __GLIBC_MINOR__ >= 34 +# define ENABLETESTTHREADING +# endif +#endif + +using namespace ov::auto_plugin::tests; + +#ifdef ENABLETESTTHREADING +TEST_F(AutoFuncTests, can_compile_with_multiple_devices) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + compiled_model = core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); +} + +TEST_F(AutoFuncTests, threading_test) { + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + }, + 10, + 10); + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + }, + 10, + 10); +} + +TEST_F(AutoFuncTests, threading_test_cache_enabled) { + core.set_property(ov::cache_dir(cache_path)); + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + }, + 10, + 10); + core.set_property(ov::cache_dir("")); +} + +TEST_F(AutoFuncTests, threading_test_get_version) { + ThreadingTest::runParallel([&]() { + auto versions = core.get_versions("AUTO"); + ASSERT_LE(1u, versions.size()); + }); +} + +TEST_F(AutoFuncTests, theading_compiled_with_cpu_help) { + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + }, + 10, + 10); +} + +TEST_F(AutoFuncTests, threading_test_hardware_slower) { + core.compile_model(model_cannot_batch, "MOCK_CPU"); + core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine + register_plugin_mock_gpu_compile_slower(core, "MOCK_GPU_SLOWER", {}); + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU_SLOWER", "MOCK_CPU")}); + }, + 10, + 10); +} + +TEST_F(AutoFuncTests, threading_test_cpu_help_slower) { + core.compile_model(model_cannot_batch, "MOCK_CPU"); + core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine + register_plugin_mock_cpu_compile_slower(core, "MOCK_CPU_SLOWER", {}); + ThreadingTest::runParallel( + [&]() { + (void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU_SLOWER")}); + }, + 10, + 10); +} +#endif \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp new file mode 100644 index 00000000000000..1ba14b66d57207 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.cpp @@ -0,0 +1,790 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" + +#include +#include +#include + +#include "common_test_utils/file_utils.hpp" +#include "ie_plugin_config.hpp" +#include "openvino/core/any.hpp" +#include "openvino/core/except.hpp" +#include "openvino/opsets/opset11.hpp" +#include "openvino/pass/serialize.hpp" +#include "openvino/runtime/auto/properties.hpp" +#include "openvino/runtime/intel_gpu/properties.hpp" +#include "openvino/runtime/internal_properties.hpp" +#include "openvino/runtime/iplugin.hpp" +#include "openvino/runtime/iremote_context.hpp" +#include "openvino/runtime/iremote_tensor.hpp" +#include "openvino/runtime/make_tensor.hpp" +#include "openvino/runtime/properties.hpp" +#include "openvino/util/file_util.hpp" +#include "openvino/util/shared_object.hpp" + +namespace { + +std::string get_mock_engine_path() { + std::string mockEngineName("mock_engine"); + return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), + mockEngineName + IE_BUILD_POSTFIX); +} + +template +std::function make_std_function(const std::shared_ptr so, const std::string& functionName) { + std::function ptr(reinterpret_cast(ov::util::get_symbol(so, functionName.c_str()))); + return ptr; +} + +bool support_model(const std::shared_ptr& model, const ov::SupportedOpsMap& supported_ops) { + for (const auto& op : model->get_ops()) { + if (supported_ops.find(op->get_friendly_name()) == supported_ops.end()) + return false; + } + return true; +} + +ov::PropertyName RO_property(const std::string& propertyName) { + return ov::PropertyName(propertyName, ov::PropertyMutability::RO); +} + +ov::PropertyName RW_property(const std::string& propertyName) { + return ov::PropertyName(propertyName, ov::PropertyMutability::RW); +} + +} // namespace + +void ov::auto_plugin::tests::AutoFuncTests::SetUp() { + if (m_mock_plugins.empty()) { + register_plugin_mock_cpu(core, "MOCK_CPU", {}); + register_plugin_mock_gpu(core, "MOCK_GPU", {}); + } + model_can_batch = create_model_with_batch_possible(); + model_cannot_batch = create_model_with_reshape(); + auto hash = std::hash()(::testing::UnitTest::GetInstance()->current_test_info()->name()); + std::stringstream ss; + ss << std::this_thread::get_id(); + cache_path = + "threading_test" + std::to_string(hash) + "_" + ss.str() + "_" + ov::test::utils::GetTimestamp() + "_cache"; +} + +void ov::auto_plugin::tests::AutoFuncTests::TearDown() { + ov::test::utils::removeFilesWithExt(cache_path, "blob"); + ov::test::utils::removeDir(cache_path); +} + +ov::Tensor ov::auto_plugin::tests::AutoFuncTests::create_and_fill_tensor(const ov::element::Type& type, + const ov::Shape& shape) { + switch (type) { + case ov::element::Type_t::i64: + return create_tensor::value_type>(type, shape); + default: + break; + } + OPENVINO_THROW("Cannot generate tensor. Unsupported element type."); +} + +std::shared_ptr ov::auto_plugin::tests::AutoFuncTests::create_model_with_batch_possible() { + auto param = std::make_shared(ov::element::i64, ov::Shape{1, 3, 2, 2}); + param->set_friendly_name("input"); + auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1}); + const_value->set_friendly_name("const_val"); + auto add = std::make_shared(param, const_value); + add->set_friendly_name("add"); + auto result = std::make_shared(add); + result->set_friendly_name("res"); + return std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); +} + +std::shared_ptr ov::auto_plugin::tests::AutoFuncTests::create_model_with_reshape() { + auto param = std::make_shared(ov::element::i64, ov::Shape{1, 3, 2, 2}); + param->set_friendly_name("input"); + auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1}); + const_value->set_friendly_name("const_val"); + auto add = std::make_shared(param, const_value); + add->set_friendly_name("add"); + auto reshape_val = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1}, {-1}); + reshape_val->set_friendly_name("reshape_val"); + auto reshape = std::make_shared(add, reshape_val, true); + reshape->set_friendly_name("reshape"); + auto result = std::make_shared(reshape); + result->set_friendly_name("res"); + return std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); +} + +// Mock plugins + +class MockCompiledModel : public ov::ICompiledModel { +public: + MockCompiledModel(const std::shared_ptr& model, + const std::shared_ptr& plugin, + const ov::AnyMap& config) + : ov::ICompiledModel(model, plugin), + m_config(config), + m_model(model), + m_has_context(false) { + try { + m_context = plugin->get_default_context(config); + } catch (ov::Exception&) { + } + } + + MockCompiledModel(const std::shared_ptr& model, + const std::shared_ptr& plugin, + const ov::AnyMap& config, + const ov::SoPtr& context) + : ov::ICompiledModel(model, plugin), + m_config(config), + m_model(model), + m_has_context(true), + m_context(context) {} + + // Methods from a base class ov::ICompiledModel + void export_model(std::ostream& model) const override { + ov::pass::StreamSerialize(model, std::function()) + .run_on_model(std::const_pointer_cast(m_model)); + } + + std::shared_ptr get_runtime_model() const override { + return m_model; + } + + void set_property(const ov::AnyMap& properties) override { + OPENVINO_NOT_IMPLEMENTED; + } + + ov::Any get_property(const std::string& name) const override { + auto prop = m_config.find(name); + if (prop != m_config.end()) + return prop->second; + if (name == ov::supported_properties) { + std::vector supportedProperties{ov::optimal_number_of_infer_requests, + ov::hint::performance_mode}; + + return decltype(ov::supported_properties)::value_type(supportedProperties); + } else if (name == ov::optimal_number_of_infer_requests.name()) { + return decltype(ov::optimal_number_of_infer_requests)::value_type(2); + } else if (name == ov::model_name) { + return decltype(ov::model_name)::value_type(m_model->get_name()); + } else if (name == ov::execution_devices) { + return decltype(ov::execution_devices)::value_type({get_plugin()->get_device_name()}); + } + OPENVINO_NOT_IMPLEMENTED; + } + + std::shared_ptr create_sync_infer_request() const override; + + const std::shared_ptr& get_model() const { + return m_model; + } + + ov::SoPtr get_context() const { + return m_context; + } + + bool has_context() const { + return m_has_context; + } + +private: + ov::AnyMap m_config; + std::shared_ptr m_model; + bool m_has_context; + ov::SoPtr m_context; +}; + +class MockInferRequest : public ov::ISyncInferRequest { +public: + MockInferRequest(const std::shared_ptr& compiled_model) + : ov::ISyncInferRequest(compiled_model) { + OPENVINO_ASSERT(compiled_model); + m_model = compiled_model->get_model(); + m_has_context = compiled_model->get_context() != nullptr; + // Allocate input/output tensors + for (const auto& input : get_inputs()) { + allocate_tensor(input, [this, input, compiled_model](ov::SoPtr& tensor) { + // Can add a check to avoid double work in case of shared tensors + allocate_tensor_impl(tensor, + input.get_element_type(), + input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape(), + compiled_model->has_context(), + compiled_model->get_context()); + }); + } + for (const auto& output : get_outputs()) { + allocate_tensor(output, [this, output, compiled_model](ov::SoPtr& tensor) { + // Can add a check to avoid double work in case of shared tensors + allocate_tensor_impl(tensor, + output.get_element_type(), + output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape(), + compiled_model->has_context(), + compiled_model->get_context()); + }); + } + } + ~MockInferRequest() = default; + + void infer() override { + ov::TensorVector input_tensors; + bool evaludate_flag = true; + for (const auto& input : get_inputs()) { + auto tensor = get_tensor(input); + // check if valid if remote tensor + if (std::dynamic_pointer_cast(tensor._ptr) && m_has_context) { + evaludate_flag = false; + auto remote_tensor = std::dynamic_pointer_cast(tensor._ptr); + if (remote_tensor->get_device_name() != get_compiled_model()->get_context()->get_device_name()) + OPENVINO_THROW("cannot consume the buffer!"); + } + input_tensors.emplace_back(ov::make_tensor(tensor)); + } + ov::TensorVector output_tensors; + for (const auto& output : get_outputs()) { + auto tensor = get_tensor(output); + // check if valid if remote tensor + if (std::dynamic_pointer_cast(tensor._ptr) && m_has_context) { + evaludate_flag = false; + auto remote_tensor = std::dynamic_pointer_cast(tensor._ptr); + if (remote_tensor->get_device_name() != get_compiled_model()->get_context()->get_device_name()) + OPENVINO_THROW("cannot consume the buffer!"); + } + output_tensors.emplace_back(ov::make_tensor(tensor)); + } + if (evaludate_flag) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // add delay for test + m_model->evaluate(output_tensors, input_tensors); + } + } + std::vector> query_state() const override { + OPENVINO_NOT_IMPLEMENTED; + } + std::vector get_profiling_info() const override { + OPENVINO_NOT_IMPLEMENTED; + } + +private: + void allocate_tensor_impl(ov::SoPtr& tensor, + const ov::element::Type& element_type, + const ov::Shape& shape, + bool has_context, + ov::SoPtr context) { + if (!tensor || tensor->get_element_type() != element_type) { + if (has_context) { + tensor = context->create_tensor(element_type, shape, {}); + } else { + tensor = ov::SoPtr(ov::make_tensor(element_type, shape), nullptr); + } + } else { + tensor->set_shape(shape); + } + } + std::shared_ptr m_model; + bool m_has_context; +}; + +std::shared_ptr MockCompiledModel::create_sync_infer_request() const { + return std::make_shared(std::dynamic_pointer_cast(shared_from_this())); +} + +class MockRemoteTensor : public ov::IRemoteTensor { + ov::AnyMap m_properties; + std::string m_dev_name; + ov::element::Type m_element_type; + ov::Shape m_shape; + +public: + MockRemoteTensor(const std::string& name, + const ov::AnyMap& props, + const ov::element::Type& type, + const ov::Shape& shape) + : m_properties(props), + m_dev_name(name), + m_element_type(type), + m_shape(shape) {} + + const ov::AnyMap& get_properties() const override { + return m_properties; + } + const std::string& get_device_name() const override { + return m_dev_name; + } + void set_shape(ov::Shape shape) override { + OPENVINO_NOT_IMPLEMENTED; + } + + const ov::element::Type& get_element_type() const override { + return m_element_type; + } + + const ov::Shape& get_shape() const override { + return m_shape; + } + + const ov::Strides& get_strides() const override { + OPENVINO_NOT_IMPLEMENTED; + } +}; + +class MockRemoteContext : public ov::IRemoteContext { + ov::AnyMap m_property = {{"IS_DEFAULT", true}}; + std::string m_dev_name; + +public: + MockRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {} + const std::string& get_device_name() const override { + return m_dev_name; + } + + const ov::AnyMap& get_property() const override { + return m_property; + } + + ov::SoPtr create_tensor(const ov::element::Type& type, + const ov::Shape& shape, + const ov::AnyMap& params = {}) override { + auto remote_tensor = std::make_shared(m_dev_name, m_property, type, shape); + return {remote_tensor, nullptr}; + } +}; + +class MockCustomRemoteContext : public ov::IRemoteContext { + ov::AnyMap m_property = {{"IS_DEFAULT", false}}; + std::string m_dev_name; + +public: + MockCustomRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {} + const std::string& get_device_name() const override { + return m_dev_name; + } + + const ov::AnyMap& get_property() const override { + return m_property; + } + + ov::SoPtr create_tensor(const ov::element::Type& type, + const ov::Shape& shape, + const ov::AnyMap& params = {}) override { + auto remote_tensor = std::make_shared(m_dev_name, m_property, type, shape); + return {remote_tensor, nullptr}; + } +}; + +class MockPluginBase : public ov::IPlugin { +public: + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + + return std::make_shared(model, shared_from_this(), properties); + } + + std::shared_ptr compile_model(const std::string& model_path, + const ov::AnyMap& properties) const override { + OPENVINO_NOT_IMPLEMENTED; + } + + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties, + const ov::SoPtr& context) const override { + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + + return std::make_shared(model, shared_from_this(), properties, context); + } + + void set_property(const ov::AnyMap& properties) override { + OPENVINO_NOT_IMPLEMENTED; + } + + ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override { + OPENVINO_NOT_IMPLEMENTED; + } + + ov::SoPtr create_context(const ov::AnyMap& remote_properties) const override { + OPENVINO_NOT_IMPLEMENTED; + } + + ov::SoPtr get_default_context(const ov::AnyMap& remote_properties) const override { + OPENVINO_NOT_IMPLEMENTED; + } + + std::shared_ptr import_model(std::istream& model, const ov::AnyMap& properties) const override { + std::string xmlString, xmlInOutString; + ov::Tensor weights; + + ov::pass::StreamSerialize::DataHeader hdr = {}; + model.read(reinterpret_cast(&hdr), sizeof hdr); + + // read CNNNetwork input/output precisions + model.seekg(hdr.custom_data_offset); + xmlInOutString.resize(hdr.custom_data_size); + model.read(const_cast(xmlInOutString.c_str()), hdr.custom_data_size); + + // read blob content + model.seekg(hdr.consts_offset); + if (hdr.consts_size) { + weights = ov::Tensor(ov::element::i8, ov::Shape{hdr.consts_size}); + char* data = static_cast(weights.data()); + model.read(data, hdr.consts_size); + } + + // read XML content + model.seekg(hdr.model_offset); + xmlString.resize(hdr.model_size); + model.read(const_cast(xmlString.c_str()), hdr.model_size); + + ov::Core core; + auto ov_model = core.read_model(xmlString, weights); + return compile_model(ov_model, properties); + } + + std::shared_ptr import_model(std::istream& model, + const ov::SoPtr& context, + const ov::AnyMap& properties) const override { + std::string xmlString, xmlInOutString; + ov::Tensor weights; + + ov::pass::StreamSerialize::DataHeader hdr = {}; + model.read(reinterpret_cast(&hdr), sizeof hdr); + + // read CNNNetwork input/output precisions + model.seekg(hdr.custom_data_offset); + xmlInOutString.resize(hdr.custom_data_size); + model.read(const_cast(xmlInOutString.c_str()), hdr.custom_data_size); + + // read blob content + model.seekg(hdr.consts_offset); + if (hdr.consts_size) { + weights = ov::Tensor(ov::element::i8, ov::Shape{hdr.consts_size}); + char* data = static_cast(weights.data()); + model.read(data, hdr.consts_size); + } + + // read XML content + model.seekg(hdr.model_offset); + xmlString.resize(hdr.model_size); + model.read(const_cast(xmlString.c_str()), hdr.model_size); + + ov::Core core; + auto ov_model = core.read_model(xmlString, weights); + return compile_model(ov_model, properties, context); + } + + ov::SupportedOpsMap query_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_NOT_IMPLEMENTED; + } +}; + +class MockPluginSupportBatchAndContext : public MockPluginBase { +public: + ov::SupportedOpsMap query_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + + std::unordered_set supported_ops = {"Parameter", "Result", "Add", "Constant", "Reshape"}; + + ov::SupportedOpsMap res; + for (const auto& op : model->get_ordered_ops()) { + if (supported_ops.find(op->get_type_info().name) == supported_ops.end()) + continue; + res.emplace(op->get_friendly_name(), get_device_name()); + } + return res; + } + + ov::SoPtr create_context(const ov::AnyMap& remote_properties) const override { + if (remote_properties.find("CUSTOM_CTX") == remote_properties.end()) + return std::make_shared(get_device_name()); + return std::make_shared(get_device_name()); + } + + ov::SoPtr get_default_context(const ov::AnyMap& remote_properties) const override { + std::string device_name = get_device_name(); + if (remote_properties.find(ov::device::id.name()) != remote_properties.end()) + device_name = device_name + "." + remote_properties.at(ov::device::id.name()).as(); + + return std::make_shared(device_name); + } + + void set_property(const ov::AnyMap& properties) override { + for (const auto& it : properties) { + if (it.first == ov::num_streams.name()) + num_streams = it.second.as(); + else if (it.first == ov::enable_profiling.name()) + m_profiling = it.second.as(); + else if (it.first == ov::hint::performance_mode.name()) + m_perf_hint = it.second.as(); + else if (it.first == ov::hint::num_requests.name()) + m_request = it.second.as(); + else if (it.first == ov::device::id.name()) + m_id = it.second.as(); + else if (it.first == ov::cache_dir.name()) + continue; + else + OPENVINO_THROW(get_device_name(), " set config: " + it.first); + } + } + + ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override { + const std::vector roProperties{RO_property(ov::supported_properties.name()), + RO_property(ov::optimal_batch_size.name()), + RO_property(ov::device::capabilities.name()), + RO_property(ov::device::type.name()), + RO_property(ov::device::uuid.name()), + RO_property(ov::device::id.name()), + RO_property(ov::intel_gpu::memory_statistics.name())}; + // the whole config is RW before network is loaded. + const std::vector rwProperties{RW_property(ov::num_streams.name()), + RW_property(ov::enable_profiling.name()), + RW_property(ov::compilation_num_threads.name()), + RW_property(ov::hint::performance_mode.name()), + RW_property(ov::hint::num_requests.name())}; + if (name == ov::supported_properties) { + std::vector supportedProperties; + supportedProperties.reserve(roProperties.size() + rwProperties.size()); + supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end()); + supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end()); + + return decltype(ov::supported_properties)::value_type(supportedProperties); + } else if (name == ov::hint::num_requests.name()) { + return decltype(ov::hint::num_requests)::value_type(1); + } else if (name == ov::hint::performance_mode.name()) { + return decltype(ov::hint::performance_mode)::value_type(ov::hint::PerformanceMode::LATENCY); + } else if (name == ov::optimal_batch_size.name()) { + return decltype(ov::optimal_batch_size)::value_type(4); + } else if (name == ov::device::capabilities.name()) { + return decltype(ov::device::capabilities)::value_type( + {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8", ov::device::capability::EXPORT_IMPORT}); + } else if (name == ov::device::type.name()) { + return decltype(ov::device::type)::value_type(ov::device::Type::INTEGRATED); + } else if (name == ov::loaded_from_cache.name()) { + return false; + } else if (name == ov::enable_profiling.name()) { + return decltype(ov::enable_profiling)::value_type{false}; + } else if (name == ov::streams::num.name()) { + return decltype(ov::streams::num)::value_type{2}; + } else if (name == ov::compilation_num_threads.name()) { + return decltype(ov::compilation_num_threads)::value_type{4}; + } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key + std::vector configs; + for (const auto& property : rwProperties) { + configs.emplace_back(property); + } + return configs; + } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key + std::vector configs; + for (const auto& property : roProperties) { + configs.emplace_back(property); + } + return configs; + } else if (name == ov::internal::supported_properties) { + return decltype(ov::internal::supported_properties)::value_type( + {ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}); + } else if (ov::internal::caching_properties == name) { + std::vector caching_properties = {ov::device::uuid, ov::device::id}; + return decltype(ov::internal::caching_properties)::value_type(caching_properties); + } else if (name == ov::device::uuid) { + ov::device::UUID uuid = {}; + return decltype(ov::device::uuid)::value_type{uuid}; + } else if (name == ov::device::id) { + return decltype(ov::device::id)::value_type{m_id}; + } else if (name == ov::loaded_from_cache.name()) { + return m_loaded_from_cache; + } else if (name == ov::intel_gpu::memory_statistics) { + return decltype(ov::intel_gpu::memory_statistics)::value_type{{}}; + } + OPENVINO_NOT_IMPLEMENTED; + } + +private: + int32_t num_streams{0}; + bool m_profiling = false; + bool m_loaded_from_cache{false}; + ov::hint::PerformanceMode m_perf_hint = ov::hint::PerformanceMode::THROUGHPUT; + uint32_t m_request = 0; + std::string m_id; +}; + +void ov::auto_plugin::tests::AutoFuncTests::reg_plugin(ov::Core& core, + std::shared_ptr& plugin, + const std::string& device_name, + const ov::AnyMap& properties) { + std::string libraryPath = get_mock_engine_path(); + if (!m_so) + m_so = ov::util::load_shared_object(libraryPath.c_str()); + plugin->set_device_name(device_name); + std::function injectProxyEngine = make_std_function(m_so, "InjectPlugin"); + + injectProxyEngine(plugin.get()); + core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), + std::string("mock_engine") + IE_BUILD_POSTFIX), + device_name, + properties); + m_mock_plugins.emplace_back(plugin); +} + +// test +void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_gpu(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties) { + std::shared_ptr base_plugin = std::make_shared(); + reg_plugin(core, base_plugin, device_name, properties); +} + +class MockPlugin : public MockPluginBase { +public: + ov::SupportedOpsMap query_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + + std::unordered_set supported_ops = {"Parameter", "Result", "Add", "Constant", "Reshape"}; + + ov::SupportedOpsMap res; + for (const auto& op : model->get_ordered_ops()) { + if (supported_ops.find(op->get_type_info().name) == supported_ops.end()) + continue; + res.emplace(op->get_friendly_name(), get_device_name()); + } + return res; + } + + void set_property(const ov::AnyMap& properties) override { + for (const auto& it : properties) { + if (it.first == ov::num_streams.name()) + num_streams = it.second.as(); + else if (it.first == ov::enable_profiling.name()) + m_profiling = it.second.as(); + else if (it.first == ov::device::id.name()) + continue; + else if (it.first == ov::cache_dir.name()) + continue; + else + OPENVINO_THROW(get_device_name(), " set config: " + it.first); + } + } + + ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override { + const std::vector roProperties{RO_property(ov::supported_properties.name()), + RO_property(ov::device::uuid.name()), + RO_property(ov::device::capabilities.name())}; + // the whole config is RW before network is loaded. + const std::vector rwProperties{RW_property(ov::num_streams.name()), + RW_property(ov::enable_profiling.name()), + RW_property(ov::hint::performance_mode.name())}; + if (name == ov::supported_properties) { + std::vector supportedProperties; + supportedProperties.reserve(roProperties.size() + rwProperties.size()); + supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end()); + supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end()); + + return decltype(ov::supported_properties)::value_type(supportedProperties); + } else if (name == ov::loaded_from_cache.name()) { + return false; + } else if (name == ov::enable_profiling.name()) { + return decltype(ov::enable_profiling)::value_type{false}; + } else if (name == ov::streams::num.name()) { + return decltype(ov::streams::num)::value_type{2}; + } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key + std::vector configs; + for (const auto& property : rwProperties) { + configs.emplace_back(property); + } + return configs; + } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key + std::vector configs; + for (const auto& property : roProperties) { + configs.emplace_back(property); + } + return configs; + } else if (name == ov::internal::supported_properties) { + return decltype(ov::internal::supported_properties)::value_type( + {ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}}); + } else if (name == ov::device::capabilities) { + std::vector capabilities; + capabilities.push_back(ov::device::capability::EXPORT_IMPORT); + return decltype(ov::device::capabilities)::value_type(capabilities); + } else if (ov::internal::caching_properties == name) { + std::vector caching_properties = {ov::device::uuid}; + return decltype(ov::internal::caching_properties)::value_type(caching_properties); + } else if (name == ov::device::uuid) { + ov::device::UUID uuid = {}; + return decltype(ov::device::uuid)::value_type{uuid}; + } else if (name == ov::loaded_from_cache.name()) { + return m_loaded_from_cache; + } + OPENVINO_NOT_IMPLEMENTED; + } + +private: + int32_t num_streams{0}; + bool m_profiling = false; + bool m_loaded_from_cache{false}; +}; + +void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_cpu(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties) { + std::shared_ptr base_plugin = std::make_shared(); + + reg_plugin(core, base_plugin, device_name, properties); +} + +void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_gpu_compile_slower(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties) { + class MockPluginCompileSlower : public MockPluginSupportBatchAndContext { + public: + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test + return std::make_shared(model, shared_from_this(), properties); + } + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties, + const ov::SoPtr& context) const override { + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test + return std::make_shared(model, shared_from_this(), properties, context); + } + }; + + std::shared_ptr base_plugin = std::make_shared(); + reg_plugin(core, base_plugin, device_name, properties); +} + +void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_cpu_compile_slower(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties) { + class MockCPUPluginCompileSlower : public MockPlugin { + public: + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties) const override { + OPENVINO_ASSERT(model); + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test + return std::make_shared(model, shared_from_this(), properties); + } + std::shared_ptr compile_model(const std::shared_ptr& model, + const ov::AnyMap& properties, + const ov::SoPtr& context) const override { + if (!support_model(model, query_model(model, properties))) + OPENVINO_THROW("Unsupported model"); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test + return std::make_shared(model, shared_from_this(), properties, context); + } + }; + + std::shared_ptr base_plugin = std::make_shared(); + reg_plugin(core, base_plugin, device_name, properties); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp new file mode 100644 index 00000000000000..711355315b4516 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp @@ -0,0 +1,133 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include +#include + +#include +#include + +#include "openvino/runtime/auto/properties.hpp" +#include "openvino/runtime/core.hpp" +#include "openvino/runtime/iplugin.hpp" + +namespace ov { +namespace auto_plugin { +namespace tests { + +#define ASSERT_THROW_WITH_MESSAGE(code, expected_exception, expected_message) \ + do { \ + try { \ + { code; } \ + FAIL() << "no exception occured" << std::endl; \ + } catch (const expected_exception& e) { \ + EXPECT_THAT(e.what(), testing::HasSubstr(expected_message)); \ + } catch (const std::exception& e) { \ + FAIL() << "an unexpected exception occured: " << e.what() << std::endl; \ + } catch (...) { \ + FAIL() << "an unknown exception occured" << std::endl; \ + } \ + } while (0); + +class PluginRemoteTensor : public ov::RemoteTensor { +public: + /** + * @brief Checks that type defined runtime parameters are presented in remote object + * @param tensor a tensor to check + */ + static void type_check(const Tensor& tensor) { + RemoteTensor::type_check(tensor, {{"IS_DEFAULT", {}}}); + } + + bool is_default() { + return get_params().at("IS_DEFAULT").as(); + } +}; + +class PluginRemoteContext : public ov::RemoteContext { +public: + // Needed to make create_tensor overloads from base class visible for user + using RemoteContext::create_host_tensor; + using RemoteContext::create_tensor; + /** + * @brief Checks that type defined runtime parameters are presented in remote object + * @param remote_context A remote context to check + */ + static void type_check(const RemoteContext& remote_context) { + RemoteContext::type_check(remote_context, {{"IS_DEFAULT", {}}}); + } + + bool is_default() { + return get_params().at("IS_DEFAULT").as(); + } +}; + +class AutoFuncTests : public ::testing::Test { +public: + ov::Core core; + + void SetUp() override; + void TearDown() override; + + ov::Tensor create_and_fill_tensor(const ov::element::Type& type, const ov::Shape& shape); + +protected: + void register_plugin_mock_cpu(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties); + void register_plugin_mock_cpu_compile_slower(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties); + void register_plugin_mock_gpu(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties); + void register_plugin_mock_gpu_compile_slower(ov::Core& core, + const std::string& device_name, + const ov::AnyMap& properties); + std::shared_ptr model_can_batch; + std::shared_ptr model_cannot_batch; + std::string cache_path; + +private: + template + ov::Tensor create_tensor(const ov::element::Type& type, const ov::Shape& shape) { + ov::Tensor tensor(type, shape); + T* data = tensor.data(); + for (size_t i = 0; i < tensor.get_size(); i++) { + data[i] = static_cast(i); + } + return tensor; + } + std::vector> m_mock_plugins; + std::shared_ptr m_so; + + void reg_plugin(ov::Core& core, + std::shared_ptr& plugin, + const std::string& device_name, + const ov::AnyMap& properties); + std::shared_ptr create_model_with_batch_possible(); + std::shared_ptr create_model_with_reshape(); +}; + +class ThreadingTest { +public: + static void runParallel(std::function func, + const unsigned int iterations = 100, + const unsigned int threadsNum = 8) { + std::vector threads(threadsNum); + + for (auto& thread : threads) { + thread = std::thread([&]() { + for (unsigned int i = 0; i < iterations; ++i) { + func(); + } + }); + } + + for (auto& thread : threads) { + if (thread.joinable()) + thread.join(); + } + } +}; +} // namespace tests +} // namespace auto_plugin +} // namespace ov \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/caching_test.cpp b/src/plugins/auto/tests/functional/behavior/caching_test.cpp new file mode 100644 index 00000000000000..ab34fe7b1a83f3 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/caching_test.cpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" +#include "common_test_utils/include/common_test_utils/file_utils.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, compiled_with_cache_enabled) { + core.set_property(ov::cache_dir(cache_path)); + core.set_property("MOCK_GPU", ov::device::id("test")); // device id for cache property distinguish with MOCK_CPU + auto compiled_model = + core.compile_model(model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 2); + compiled_model = core.compile_model(model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + // can reuse the cache, no extra cache generated + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 2); + core.set_property("MOCK_GPU", ov::device::id("test_regenerate")); + compiled_model = core.compile_model(model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + // new cache file expected + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 3); + core.set_property(ov::cache_dir("")); +} + +TEST_F(AutoFuncTests, compiled_with_cache_enabled_batch_enabled) { +#ifdef ENABLE_AUTO_BATCH + core.set_property(ov::cache_dir(cache_path)); + core.set_property("MOCK_GPU", ov::device::id("test")); // device id for cache property distinguish with MOCK_CPU + auto compiled_model = + core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 3); + compiled_model = core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + // can reuse the cache, no extra cache generated + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 3); + core.set_property("MOCK_GPU", ov::device::id("test_regenerate")); + compiled_model = core.compile_model(model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + // new cache file expected + ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 5); + core.set_property(ov::cache_dir("")); +#endif +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/callback_test.cpp b/src/plugins/auto/tests/functional/behavior/callback_test.cpp new file mode 100644 index 00000000000000..5219677b99a5cb --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/callback_test.cpp @@ -0,0 +1,116 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "auto_func_test.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, can_infer_with_cpu_help) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + auto req = compiled_model.create_infer_request(); + bool is_called = false; + ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE + ASSERT_EQ(exception_ptr, nullptr); + is_called = true; + })); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_TRUE(is_called); +} + +TEST_F(AutoFuncTests, impl_does_not_copy_callback) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + { + auto somePtr = std::make_shared(42); + ASSERT_NO_THROW(req.set_callback([somePtr](std::exception_ptr exception_ptr) { + ASSERT_EQ(nullptr, exception_ptr); + ASSERT_EQ(1, somePtr.use_count()); + })); + } + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); +} + +TEST_F(AutoFuncTests, return_result_not_ready) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + std::promise callbackTimeStamp; + auto callbackTimeStampFuture = callbackTimeStamp.get_future(); + // add a callback to the request and capture the timestamp + ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + if (exception_ptr) { + callbackTimeStamp.set_exception(exception_ptr); + } else { + callbackTimeStamp.set_value(std::chrono::system_clock::now()); + } + })); + ASSERT_NO_THROW(req.start_async()); + bool ready = false; + ASSERT_NO_THROW(ready = req.wait_for({})); + // get timestamp taken AFTER return from the wait(STATUS_ONLY) + const auto afterWaitTimeStamp = std::chrono::system_clock::now(); + if (afterWaitTimeStamp < callbackTimeStampFuture.get()) { + ASSERT_FALSE(ready); + } + ASSERT_NO_THROW(req.wait()); +} + +TEST_F(AutoFuncTests, rethrow_if_callback_throw) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req.set_callback([](std::exception_ptr) { + OPENVINO_THROW("Throw"); + })); + ASSERT_NO_THROW(req.start_async()); + ASSERT_THROW(req.wait(), ov::Exception); +} + +TEST_F(AutoFuncTests, can_start_several_async_inside_completion_callback_with_safedtor) { + const int NUM_ITER = 10; + struct TestUserData { + std::atomic numIter = {0}; + std::promise promise; + }; + TestUserData data; + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { + if (exception_ptr) { + data.promise.set_exception(exception_ptr); + } else { + if (data.numIter.fetch_add(1) != NUM_ITER) { + req.start_async(); + } else { + data.promise.set_value(true); + } + } + })); + auto future = data.promise.get_future(); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + future.wait(); + auto callbackStatus = future.get(); + ASSERT_TRUE(callbackStatus); + auto dataNumIter = data.numIter - 1; + ASSERT_EQ(NUM_ITER, dataNumIter); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/infer_consistency_test.cpp b/src/plugins/auto/tests/functional/behavior/infer_consistency_test.cpp new file mode 100644 index 00000000000000..2d71b8ee7d16b0 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/infer_consistency_test.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "infer_consistency_test.hpp" + +using namespace ov::auto_plugin::tests; +namespace { +auto props = []() { + return std::vector{{ov::device::priorities("MOCK_GPU", "MOCK_CPU")}, + {ov::device::priorities("MOCK_GPU")}, + {ov::device::priorities("MOCK_CPU", "MOCK_GPU")}}; +}; + +const std::vector get_vs_set{true, false}; + +const std::vector target_device{"AUTO", "MULTI"}; + +INSTANTIATE_TEST_SUITE_P(AutoFuncTests, + Consistency_Test, + ::testing::Combine(::testing::ValuesIn(target_device), + ::testing::ValuesIn(get_vs_set), + ::testing::ValuesIn(props())), + Consistency_Test::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/behavior/infer_consistency_test.hpp b/src/plugins/auto/tests/functional/behavior/infer_consistency_test.hpp new file mode 100644 index 00000000000000..6243a45b16e366 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/infer_consistency_test.hpp @@ -0,0 +1,105 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include +#include +#include + +#include "auto_func_test.hpp" +#include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_common.hpp" +#include "ov_models/subgraph_builders.hpp" + +namespace ov { +namespace auto_plugin { +namespace tests { + +using consistency_test_param = std::tuple; // property + +class Consistency_Test : public AutoFuncTests, public testing::WithParamInterface { + void SetUp() override { + AutoFuncTests::SetUp(); + std::tie(target_device, use_get_tensor, property) = this->GetParam(); + }; + +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + ov::AnyMap property; + bool use_get_tensor; + std::string target_device; + std::tie(target_device, use_get_tensor, property) = obj.param; + std::ostringstream result; + result << "target_device=" << target_device << "_"; + result << std::string(use_get_tensor ? "_get_blob" : "_set_blob") << "_"; + if (!property.empty()) { + for (auto& iter : property) { + result << "priority=" << iter.first << "_" << iter.second.as(); + } + } + return result.str(); + } + +protected: + bool use_get_tensor; + ov::AnyMap property; + std::string target_device; + + void run() { + std::vector irs; + std::vector> ref; + std::map, ov::Tensor> input_data; + + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + auto inputs = compiled_model.inputs(); + auto outputs = compiled_model.outputs(); + auto num_requests = compiled_model.get_property(ov::optimal_number_of_infer_requests); + for (size_t j = 0; j < num_requests; j++) { + auto inf_req = compiled_model.create_infer_request(); + irs.push_back(inf_req); + for (auto& iter : inputs) { + auto tensor = ov::test::utils::create_and_fill_tensor(iter.get_element_type(), iter.get_shape()); + if (use_get_tensor) + memcpy(reinterpret_cast(inf_req.get_tensor(iter).data()), + reinterpret_cast(tensor.data()), + tensor.get_byte_size()); + else + inf_req.set_tensor(iter, tensor); + auto node_ptr = iter.get_node_shared_ptr(); + input_data.insert({std::const_pointer_cast(node_ptr), tensor}); + } + for (auto& iter : outputs) { + if (!use_get_tensor) { + auto tensor = ov::Tensor(iter.get_element_type(), iter.get_shape()); + inf_req.set_tensor(iter, tensor); + } + } + auto refOutData = ngraph::helpers::interpretFunction(model_cannot_batch, input_data); + ref.push_back(refOutData); + } + for (size_t i = 0; i < 50; i++) { + for (auto ir : irs) { + ir.start_async(); + } + + for (auto ir : irs) { + ir.wait(); + } + } + for (size_t i = 0; i < irs.size(); ++i) { + for (auto& iter : outputs) { + ov::test::utils::compare(irs[i].get_tensor(iter), ref[i][0]); + } + } + } +}; + +TEST_P(Consistency_Test, infer_consistency_test) { + run(); +} + +} // namespace tests +} // namespace auto_plugin +} // namespace ov diff --git a/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp b/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp new file mode 100644 index 00000000000000..4065c5abed7b20 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/infer_multi_threading_tests.cpp @@ -0,0 +1,114 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "auto_func_test.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, can_run_3syncrequests_consistently_from_threads) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + ov::InferRequest req1, req2, req3; + ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + auto f1 = std::async(std::launch::async, [&] { + req1.infer(); + }); + auto f2 = std::async(std::launch::async, [&] { + req2.infer(); + }); + auto f3 = std::async(std::launch::async, [&] { + req3.infer(); + }); + + f1.wait(); + f2.wait(); + f3.wait(); + + ASSERT_NO_THROW(f1.get()); + ASSERT_NO_THROW(f2.get()); + ASSERT_NO_THROW(f3.get()); +} + +TEST_F(AutoFuncTests, can_run_3asyncrequests_consistently_from_threads_without_wait) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + ov::InferRequest req1, req2, req3; + ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req1.infer()); + ASSERT_NO_THROW(req2.infer()); + ASSERT_NO_THROW(req3.infer()); + + auto f1 = std::async(std::launch::async, [&] { + req1.start_async(); + }); + auto f2 = std::async(std::launch::async, [&] { + req2.start_async(); + }); + auto f3 = std::async(std::launch::async, [&] { + req3.start_async(); + }); + + f1.wait(); + f2.wait(); + f3.wait(); + + ASSERT_NO_THROW(f1.get()); + ASSERT_NO_THROW(f2.get()); + ASSERT_NO_THROW(f3.get()); +} + +TEST_F(AutoFuncTests, can_run_3asyncrequests_consistently_with_wait) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + ov::InferRequest req1, req2, req3; + ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + req1.start_async(); + ASSERT_NO_THROW(req1.wait()); + + req2.start_async(); + ASSERT_NO_THROW(req2.wait()); + + req3.start_async(); + ASSERT_NO_THROW(req3.wait()); +} + +TEST_F(AutoFuncTests, can_run_3asyncrequests_parallel_with_wait) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + ov::InferRequest req1, req2, req3; + ASSERT_NO_THROW(req1 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req2 = compiled_model.create_infer_request()); + ASSERT_NO_THROW(req3 = compiled_model.create_infer_request()); + req1.start_async(); + req2.start_async(); + req3.start_async(); + + ASSERT_NO_THROW(req2.wait()); + ASSERT_NO_THROW(req1.wait()); + ASSERT_NO_THROW(req3.wait()); +} diff --git a/src/plugins/auto/tests/functional/behavior/io_tensor.cpp b/src/plugins/auto/tests/functional/behavior/io_tensor.cpp new file mode 100644 index 00000000000000..770fe04444a708 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/io_tensor.cpp @@ -0,0 +1,172 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "io_tensor.hpp" + +#include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" + +using namespace ov::auto_plugin::tests; + +void InferRequest_IOTensor_Test::SetUp() { + AutoFuncTests::SetUp(); + std::tie(target_device, property) = this->GetParam(); + auto compiled_model = + core.compile_model(model_cannot_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + input = compiled_model.input(); + output = compiled_model.output(); +} + +void InferRequest_IOTensor_Test::TearDown() { + input = {}; + output = {}; + AutoFuncTests::TearDown(); +} + +TEST_P(InferRequest_IOTensor_Test, fail_to_set_nullptr_for_input) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ASSERT_THROW(req.set_tensor(input, {}), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, fail_to_set_nullptr_for_output) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ASSERT_THROW(req.set_tensor(output, {}), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, can_set_and_get_input) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + ASSERT_NO_THROW(req.set_tensor(input, tensor)); + ov::Tensor actual_tensor; + ASSERT_NO_THROW(actual_tensor = req.get_tensor(input)); + + ASSERT_TRUE(actual_tensor); + ASSERT_NE(nullptr, actual_tensor.data()); + ASSERT_EQ(tensor.data(), actual_tensor.data()); + ASSERT_EQ(input.get_element_type(), actual_tensor.get_element_type()); + ASSERT_EQ(input.get_shape(), actual_tensor.get_shape()); +} + +TEST_P(InferRequest_IOTensor_Test, fail_to_set_tensor_with_incorrect_name) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + ASSERT_THROW(req.set_tensor("incorrect_input", tensor), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, fail_input_set_size_incorrect) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto shape = input.get_shape(); + shape[0] *= 2; + auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), shape); + ASSERT_THROW(req.set_tensor(input, tensor), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, fail_output_set_size_incorrect) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto shape = output.get_shape(); + shape[0] *= 2; + auto tensor = ov::test::utils::create_and_fill_tensor(output.get_element_type(), shape); + ASSERT_THROW(req.set_tensor(output, tensor), ov::Exception); +} + +TEST_P(InferRequest_IOTensor_Test, second_call_get_input) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor tensor1, tensor2; + ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); + ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(InferRequest_IOTensor_Test, second_call_get_output) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor tensor1, tensor2; + ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); + ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(InferRequest_IOTensor_Test, second_call_get_input_after_async) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor tensor1, tensor2; + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(tensor1 = req.get_tensor(input)); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(tensor2 = req.get_tensor(input)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(InferRequest_IOTensor_Test, second_call_get_output_after_async) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor tensor1, tensor2; + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(tensor1 = req.get_tensor(output)); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(tensor2 = req.get_tensor(output)); + ASSERT_EQ(tensor1.data(), tensor2.data()); +} + +TEST_P(InferRequest_IOTensor_Test, can_infer_with_set_tensor) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + auto input_tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + ASSERT_NO_THROW(req.set_tensor(input, input_tensor)); + auto output_tensor = ov::test::utils::create_and_fill_tensor(output.get_element_type(), output.get_shape()); + ASSERT_NO_THROW(req.set_tensor(output, output_tensor)); + ASSERT_NO_THROW(req.infer()); + + auto actual_input_tensor = req.get_tensor(input); + ASSERT_EQ(actual_input_tensor.data(), input_tensor.data()); + auto actual_output_tensor = req.get_tensor(output); + ASSERT_EQ(actual_output_tensor.data(), output_tensor.data()); +} + +TEST_P(InferRequest_IOTensor_Test, can_infer_after_io_realloc) { + auto compiled_model = core.compile_model(model_cannot_batch, target_device, property); + req = compiled_model.create_infer_request(); + ov::Tensor input_tensor, output_tensor; + auto in_shape = input.get_shape(); + auto out_shape = output.get_shape(); + + // imitates blob reallocation + ASSERT_NO_THROW(input_tensor = req.get_tensor(input)); + ASSERT_NO_THROW(input_tensor.set_shape({5, 5, 5, 5})); + ASSERT_NO_THROW(input_tensor.set_shape(in_shape)); + + ASSERT_NO_THROW(output_tensor = req.get_tensor(output)); + ASSERT_NO_THROW(output_tensor.set_shape({20, 20, 20, 20})); + ASSERT_NO_THROW(output_tensor.set_shape(out_shape)); + + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(req.get_tensor(output)); +} +namespace { +auto props = []() { + return std::vector{{ov::device::priorities("MOCK_GPU", "MOCK_CPU")}, + {ov::device::priorities("MOCK_GPU")}, + {ov::device::priorities("MOCK_CPU", "MOCK_GPU")}}; +}; + +INSTANTIATE_TEST_SUITE_P(AutoFuncTests, + InferRequest_IOTensor_Test, + ::testing::Combine(::testing::Values("AUTO"), ::testing::ValuesIn(props())), + InferRequest_IOTensor_Test::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(AutoFuncTestsCumu, + InferRequest_IOTensor_Test, + ::testing::Combine(::testing::Values("MULTI"), ::testing::ValuesIn(props())), + InferRequest_IOTensor_Test::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/io_tensor.hpp b/src/plugins/auto/tests/functional/behavior/io_tensor.hpp new file mode 100644 index 00000000000000..c4e000395f3eac --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/io_tensor.hpp @@ -0,0 +1,51 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include +#include + +#include +#include + +#include "auto_func_test.hpp" +#include "openvino/runtime/auto/properties.hpp" +#include "openvino/runtime/core.hpp" +#include "openvino/runtime/iplugin.hpp" + +namespace ov { +namespace auto_plugin { +namespace tests { + +using test_params = std::tuple; + +class InferRequest_IOTensor_Test : public AutoFuncTests, public ::testing::WithParamInterface { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + std::string target_device; + ov::AnyMap configuration; + std::tie(target_device, configuration) = obj.param; + std::ostringstream result; + result << "target_device=" << target_device << "_"; + if (!configuration.empty()) { + for (auto& iter : configuration) { + result << "priority=" << iter.first << "_" << iter.second.as(); + } + } + return result.str(); + } + + void SetUp() override; + void TearDown() override; + +protected: + std::string target_device; + ov::InferRequest req; + ov::Output input; + ov::Output output; + ov::AnyMap property; +}; +} // namespace tests +} // namespace auto_plugin +} // namespace ov \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/life_time_batch_enabled_test.cpp b/src/plugins/auto/tests/functional/behavior/life_time_batch_enabled_test.cpp new file mode 100644 index 00000000000000..ba358be578943a --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/life_time_batch_enabled_test.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" +#include "common_test_utils/include/common_test_utils/file_utils.hpp" +#include "openvino/runtime/make_tensor.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, tensor_life_time_with_batch_model) { + auto gpu_compiled_model = core.compile_model(model_can_batch, "MOCK_GPU"); + auto gpu_request = gpu_compiled_model.create_infer_request(); + auto input = gpu_compiled_model.input(); + auto gpu_tensor = gpu_request.get_tensor(input); + auto gpu_tensor_detail = ov::get_tensor_impl(gpu_tensor); + + auto compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU"), ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}); + auto request = compiled_model.create_infer_request(); + auto tensor = request.get_tensor(input); + auto tensor_detail = ov::get_tensor_impl(gpu_tensor); + ASSERT_EQ(tensor_detail._so, gpu_tensor_detail._so); +} + +TEST_F(AutoFuncTests, tensor_life_time_with_batch_model_latency_hint) { + auto gpu_compiled_model = core.compile_model(model_can_batch, "MOCK_GPU"); + auto gpu_request = gpu_compiled_model.create_infer_request(); + auto input = gpu_compiled_model.input(); + auto gpu_tensor = gpu_request.get_tensor(input); + auto gpu_tensor_detail = ov::get_tensor_impl(gpu_tensor); + + auto compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU")}); + auto request = compiled_model.create_infer_request(); + auto tensor = request.get_tensor(input); + auto tensor_detail = ov::get_tensor_impl(gpu_tensor); + ASSERT_EQ(tensor_detail._so, gpu_tensor_detail._so); +} + +TEST_F(AutoFuncTests, tensor_life_time_with_batch_not_applicable_model) { + auto gpu_compiled_model = core.compile_model(model_cannot_batch, "MOCK_GPU"); + auto gpu_request = gpu_compiled_model.create_infer_request(); + auto input = gpu_compiled_model.input(); + auto gpu_tensor = gpu_request.get_tensor(input); + auto gpu_tensor_detail = ov::get_tensor_impl(gpu_tensor); + + auto compiled_model = core.compile_model( + model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU"), ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}); + auto request = compiled_model.create_infer_request(); + auto tensor = request.get_tensor(input); + auto tensor_detail = ov::get_tensor_impl(gpu_tensor); + ASSERT_EQ(tensor_detail._so, gpu_tensor_detail._so); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/property_test.cpp b/src/plugins/auto/tests/functional/behavior/property_test.cpp new file mode 100644 index 00000000000000..cfba5f0308b01b --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/property_test.cpp @@ -0,0 +1,99 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, default_perfmode_for_multi) { + auto compiled_model = + core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::THROUGHPUT); +} + +TEST_F(AutoFuncTests, respect_secondary_property_for_multi) { + auto compiled_model = core.compile_model( + model_cannot_batch, + "MULTI", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::device::properties("MOCK_GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), + ov::device::properties("MOCK_CPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::THROUGHPUT); + auto prop = compiled_model.get_property(ov::device::properties.name()).as(); + for (auto& item : prop) { + for (auto& item2 : item.second.as()) { + if (item2.first == ov::hint::performance_mode) { + if (item.first == "MOCK_CPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY); + } else if (item.first == "MOCK_GPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT); + } + } + } + } +} + +TEST_F(AutoFuncTests, default_perfmode_for_auto_ctput) { + auto compiled_model = + core.compile_model(model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), + ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT); + auto prop = compiled_model.get_property(ov::device::properties.name()).as(); + for (auto& item : prop) { + for (auto& item2 : item.second.as()) { + if (item2.first == ov::hint::performance_mode) { + if (item.first == "MOCK_CPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT); + } else if (item.first == "MOCK_GPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT); + } + } + } + } +} + +TEST_F(AutoFuncTests, default_perfmode_for_auto) { + auto compiled_model = + core.compile_model(model_cannot_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::LATENCY); + auto prop = compiled_model.get_property(ov::device::properties.name()).as(); + for (auto& item : prop) { + for (auto& item2 : item.second.as()) { + if (item2.first == ov::hint::performance_mode) { + if (item.first == "MOCK_CPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY); + } else if (item.first == "MOCK_GPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY); + } + } + } + } +} + +TEST_F(AutoFuncTests, respect_secondary_property_auto_ctput) { + auto compiled_model = core.compile_model( + model_cannot_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), + ov::device::properties("MOCK_GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), + ov::device::properties("MOCK_CPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}); + EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), + ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT); + auto prop = compiled_model.get_property(ov::device::properties.name()).as(); + for (auto& item : prop) { + for (auto& item2 : item.second.as()) { + if (item2.first == ov::hint::performance_mode) { + if (item.first == "MOCK_CPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY); + } else if (item.first == "MOCK_GPU") { + EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT); + } + } + } + } +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp b/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp new file mode 100644 index 00000000000000..ef8446ee6f6efe --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/remote_tensor_test.cpp @@ -0,0 +1,104 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity) { + ov::CompiledModel compiled_model; + compiled_model = core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU")}); + auto input = model_cannot_batch->get_parameters().at(0); + auto output = model_cannot_batch->get_results().at(0); + auto fake_img_data = ov::Tensor(input->get_element_type(), input->get_shape()); + auto inf_req_regular = compiled_model.create_infer_request(); + inf_req_regular.set_tensor(input, fake_img_data); + // infer using system memory + ASSERT_NO_THROW(inf_req_regular.infer()); + auto output_tensor_regular = inf_req_regular.get_tensor(output); + auto cldnn_context = core.get_default_context("MOCK_GPU"); + auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); + + auto infer_req_remote = compiled_model.create_infer_request(); + infer_req_remote.set_tensor(input, remote_tensor); + // infer using remote tensor + ASSERT_NO_THROW(infer_req_remote.start_async()); + // no actual inference for remote tensor, due to data not able to mmap + infer_req_remote.wait(); +} + +TEST_F(AutoFuncTests, cannot_infer_remote_if_not_initialized_for_device) { + core.compile_model(model_cannot_batch, "MOCK_CPU"); + core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine + // simulate 2 hardware devices + register_plugin_mock_gpu(core, "MOCK_3", {}); + ov::CompiledModel compiled_model; + auto cldnn_context = core.get_default_context("MOCK_GPU"); + auto input = model_cannot_batch->get_parameters().at(0); + auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); + ASSERT_NO_THROW(compiled_model = + core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_3")})); + auto infer_req_remote = compiled_model.create_infer_request(); + infer_req_remote.set_tensor(input, remote_tensor); + ASSERT_NO_THROW(infer_req_remote.start_async()); + ASSERT_THROW(infer_req_remote.wait(), ov::Exception); +} + +TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices) { + core.compile_model(model_cannot_batch, "MOCK_CPU"); + core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine + register_plugin_mock_gpu(core, "MOCK_3", {}); + ov::CompiledModel compiled_model; + auto input = model_cannot_batch->get_parameters().at(0); + ASSERT_NO_THROW( + compiled_model = + core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU", "MOCK_3")})); + std::vector inf_req_shared = {}; + auto cldnn_context = core.get_default_context("MOCK_GPU"); + auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); + ASSERT_EQ(remote_tensor.get_device_name(), "MOCK_GPU"); + auto cldnn_context_2 = core.get_default_context("MOCK_3"); + auto remote_tensor_2 = cldnn_context_2.create_tensor(input->get_element_type(), input->get_shape()); + ASSERT_EQ(remote_tensor_2.get_device_name(), "MOCK_3"); + auto infer_req_remote = compiled_model.create_infer_request(); + infer_req_remote.set_tensor(input, remote_tensor); + auto infer_req_remote_2 = compiled_model.create_infer_request(); + infer_req_remote_2.set_tensor(input, remote_tensor_2); + // infer using remote tensor + ASSERT_NO_THROW(infer_req_remote.start_async()); + ASSERT_NO_THROW(infer_req_remote_2.start_async()); + ASSERT_NO_THROW(infer_req_remote.wait()); + ASSERT_NO_THROW(infer_req_remote_2.wait()); +} + +TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices_device_id) { + ov::CompiledModel compiled_model; + auto input = model_cannot_batch->get_parameters().at(0); + ASSERT_NO_THROW( + compiled_model = + core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU.1", "MOCK_CPU")})); + auto cldnn_context = core.get_default_context("MOCK_GPU"); + auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); + ASSERT_EQ(remote_tensor.get_device_name(), "MOCK_GPU"); + auto infer_req_remote = compiled_model.create_infer_request(); + infer_req_remote.set_tensor(input, remote_tensor); + // infer using remote tensor + ASSERT_NO_THROW(infer_req_remote.start_async()); + ASSERT_THROW_WITH_MESSAGE(infer_req_remote.wait(), + ov::Exception, + "None of the devices supports a remote tensor created on the device named MOCK_GPU"); +} + +TEST_F(AutoFuncTests, can_throw_if_oversubsciption_of_inferrequest) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_cannot_batch, + "MULTI", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), ov::intel_auto::device_bind_buffer(true)})); + auto optimal_num = compiled_model.get_property(ov::optimal_number_of_infer_requests); + for (size_t i = 0; i < optimal_num; i++) { + compiled_model.create_infer_request(); + } + ASSERT_THROW(compiled_model.create_infer_request(), ov::Exception); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/behavior/wait_test.cpp b/src/plugins/auto/tests/functional/behavior/wait_test.cpp new file mode 100644 index 00000000000000..33f8ee50b56c8b --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/wait_test.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "auto_func_test.hpp" +#include "openvino/runtime/exception.hpp" + +using namespace ov::auto_plugin::tests; + +TEST_F(AutoFuncTests, can_infer_and_wait_for_result) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + auto req = compiled_model.create_infer_request(); + ov::Tensor tensor; + auto input = compiled_model.input(); + auto output = compiled_model.output(); + ASSERT_NO_THROW(tensor = req.get_tensor(input)); + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(tensor = req.get_tensor(output)); +} + +TEST_F(AutoFuncTests, can_wait_without_startasync) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + auto req = compiled_model.create_infer_request(); + ASSERT_NO_THROW(req.wait()); + ASSERT_NO_THROW(req.wait_for({})); + ASSERT_NO_THROW(req.wait_for(std::chrono::milliseconds{1})); +} + +TEST_F(AutoFuncTests, can_throw_if_request_busy) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + auto req = compiled_model.create_infer_request(); + auto input = compiled_model.input(); + auto output = compiled_model.output(); + auto output_tensor = req.get_tensor(input); + ASSERT_NO_THROW(req.wait_for({})); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(try { req.set_tensor(input, output_tensor); } catch (const ov::Busy&){}); + ASSERT_NO_THROW(req.wait_for({})); + ASSERT_NO_THROW(req.wait()); +} + +TEST_F(AutoFuncTests, can_throw_on_get_tensor_if_request_busy) { + ov::CompiledModel compiled_model; + ASSERT_NO_THROW(compiled_model = core.compile_model( + model_can_batch, + "AUTO", + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); + auto req = compiled_model.create_infer_request(); + auto input = compiled_model.input(); + ASSERT_NO_THROW(req.start_async()); + ASSERT_NO_THROW(try { req.get_tensor(input); } catch (const ov::Busy&){}); + ASSERT_NO_THROW(req.wait()); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp new file mode 100644 index 00000000000000..845f63ee4cee8c --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/executable_network/exec_network_base.hpp" + +#include "ie_plugin_config.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> auto_configs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + ExecutableNetworkBaseTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_configs)), + ExecutableNetworkBaseTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + ExecutableNetworkBaseTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_configs)), + ExecutableNetworkBaseTest::getTestCaseName); + +const std::vector netPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::U8, + InferenceEngine::Precision::I16, + InferenceEngine::Precision::U16}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + ExecNetSetPrecision, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_configs)), + ExecNetSetPrecision::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + ExecNetSetPrecision, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_configs)), + ExecNetSetPrecision::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp new file mode 100644 index 00000000000000..ca702dc66db4bc --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/executable_network/get_metric.hpp" + +using namespace BehaviorTestsDefinitions; + +using namespace InferenceEngine::PluginConfigParams; + +namespace { + +// +// Executable Network GetMetric +// + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, + ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest, + IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp new file mode 100644 index 00000000000000..c02a5c44c30e35 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/callback.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> multiConfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestCallbackTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestCallbackTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestCallbackTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + InferRequestCallbackTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp new file mode 100644 index 00000000000000..483067a521c1b0 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/io_blob.hpp" + +#include + +#include "ie_plugin_config.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> Autoconfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestIOBBlobTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + InferRequestIOBBlobTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestIOBBlobTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + InferRequestIOBBlobTest::getTestCaseName); + +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp similarity index 53% rename from src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp rename to src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp index 60e9d3542d41c3..5cbda535d8b2df 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "behavior/infer_request/memory_states.hpp" + +#include + #include "functional_test_utils/plugin_cache.hpp" #include "ov_models/builders.hpp" @@ -11,17 +13,17 @@ using namespace BehaviorTestsDefinitions; namespace { std::vector memoryStateTestCases = { -#ifdef ENABLE_INTEL_CPU + memoryStateParams(InferRequestVariableStateTest::getNetwork(), + {"c_1-3", "r_1-3"}, + ov::test::utils::DEVICE_AUTO, + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}), memoryStateParams(InferRequestVariableStateTest::getNetwork(), {"c_1-3", "r_1-3"}, ov::test::utils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}) -#endif -}; + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}})}; -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestQueryStateExceptionTest, +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestVariableStateTest, ::testing::ValuesIn(memoryStateTestCases), - InferRequestQueryStateExceptionTest::getTestCaseName); + InferRequestVariableStateTest::getTestCaseName); } // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp new file mode 100644 index 00000000000000..27a82693f28ff6 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/infer_request/multithreading.hpp" +#include "ie_plugin_config.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> Multiconfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestMultithreadingTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + InferRequestMultithreadingTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestMultithreadingTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Multiconfigs)), + InferRequestMultithreadingTests::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp new file mode 100644 index 00000000000000..baa0c4fe978c29 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/perf_counters.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> Autoconfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestPerfCountersTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + InferRequestPerfCountersTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestPerfCountersTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + InferRequestPerfCountersTest::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp new file mode 100644 index 00000000000000..c1037519a72f8e --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/set_blob_by_type.hpp" + +#include "common_test_utils/test_constants.hpp" + +using namespace BehaviorTestsDefinitions; +using namespace InferenceEngine; + +const std::vector BlobTypes = { + FuncTestUtils::BlobType::Compound, + FuncTestUtils::BlobType::Batched, + FuncTestUtils::BlobType::Memory, +}; + +const std::map autoConfig{ + {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi, + InferRequestSetBlobByType, + ::testing::Combine(::testing::ValuesIn(BlobTypes), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::Values(autoConfig)), + InferRequestSetBlobByType::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto, + InferRequestSetBlobByType, + ::testing::Combine(::testing::ValuesIn(BlobTypes), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::Values(autoConfig)), + InferRequestSetBlobByType::getTestCaseName); diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp new file mode 100644 index 00000000000000..e1307f5092f6a5 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/infer_request/wait.hpp" + +#include + +#include "ie_plugin_config.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector> Autoconfigs = { + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestWaitTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + InferRequestWaitTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestWaitTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + InferRequestWaitTests::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp new file mode 100644 index 00000000000000..20cb407b3ee694 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/compiled_model/properties.hpp" +#include "openvino/runtime/core.hpp" + +using namespace ov::test::behavior; + +using namespace InferenceEngine::PluginConfigParams; + +namespace { +// +// Executable Network GetMetric +// + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, + OVClassCompiledModelGetPropertyTest, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); + +// +// Executable Network GetConfig / SetConfig +// + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetIncorrectPropertyTest, + OVClassCompiledModelGetIncorrectPropertyTest, + ::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE")); +////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp new file mode 100644 index 00000000000000..60d1c7b6a90e3b --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/compiled_model/compiled_model_base.hpp" +#include "ie_plugin_config.hpp" + +using namespace ov::test::behavior; +namespace { +const std::vector multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVCompiledModelBaseTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + OVCompiledModelBaseTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVCompiledModelBaseTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + OVCompiledModelBaseTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVCompiledModelBaseTestOptional, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + OVCompiledModelBaseTestOptional::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVCompiledModelBaseTestOptional, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + OVCompiledModelBaseTestOptional::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp new file mode 100644 index 00000000000000..f264a55c667a9f --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include + +#include "behavior/compiled_model/import_export.hpp" +#include "ie_plugin_config.hpp" + +using namespace ov::test::behavior; +namespace { +const std::vector netPrecisions = { + ov::element::i8, + ov::element::i16, + ov::element::i32, + ov::element::i64, + ov::element::u8, + ov::element::u16, + ov::element::u32, + ov::element::u64, + ov::element::f16, + ov::element::f32, +}; + +const std::vector auto_configs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVCompiledGraphImportExportTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_configs)), + OVCompiledGraphImportExportTest::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp new file mode 100644 index 00000000000000..981e8d66aa48b3 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -0,0 +1,144 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/compiled_model/properties.hpp" + +#include "ie_system_conf.h" +#include "openvino/runtime/properties.hpp" + +using namespace ov::test::behavior; + +namespace { + +const std::vector inproperties = { + {ov::device::id("UNSUPPORTED_DEVICE_ID_STRING")}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, + OVClassCompiledModelPropertiesIncorrectTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI, "AUTO:TEMPLATE"), + ::testing::ValuesIn(inproperties)), + OVClassCompiledModelPropertiesIncorrectTests::getTestCaseName); + +#if (defined(__APPLE__) || defined(_WIN32)) +auto default_affinity = [] { + auto numaNodes = InferenceEngine::getAvailableNUMANodes(); + auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + if (coreTypes.size() > 1) { + return ov::Affinity::HYBRID_AWARE; + } else if (numaNodes.size() > 1) { + return ov::Affinity::NUMA; + } else { + return ov::Affinity::NONE; + } +}(); +#else +auto default_affinity = [] { + auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + if (coreTypes.size() > 1) { + return ov::Affinity::HYBRID_AWARE; + } else { + return ov::Affinity::CORE; + } +}(); +#endif + +const std::vector multi_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::num_streams(ov::streams::AUTO)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, + InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVClassCompiledModelPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multi_properties)), + OVClassCompiledModelPropertiesTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_OVCompiledModelIncorrectDevice, + OVCompiledModelIncorrectDevice, + ::testing::Values("TEMPLATE")); + +const std::vector auto_multi_device_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::device::properties("TEMPLATE", ov::num_streams(4))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties("TEMPLATE", ov::num_streams(4), ov::enable_profiling(true))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties(ov::AnyMap{{"TEMPLATE", ov::AnyMap{{ov::num_streams(4), ov::enable_profiling(true)}}}})}}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsNoThrow, + OVClassCompiledModelPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_multi_device_properties)), + OVClassCompiledModelPropertiesTests::getTestCaseName); + +const std::vector configsWithSecondaryProperties = { + {ov::device::properties("TEMPLATE", ov::num_streams(4))}, + {ov::device::properties("TEMPLATE", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}}; + +const std::vector autoConfigsWithSecondaryProperties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties("AUTO", + ov::enable_profiling(false), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties("TEMPLATE", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::device::properties("AUTO", + ov::enable_profiling(false), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), + ov::device::properties("TEMPLATE", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}}; + +// IE Class Load network +INSTANTIATE_TEST_SUITE_P(smoke_CPUOVClassCompileModelWithCorrectPropertiesTest, + OVClassCompileModelWithCorrectPropertiesTest, + ::testing::Combine(::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE"), + ::testing::ValuesIn(configsWithSecondaryProperties))); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_OVClassCompileModelWithCorrectPropertiesTest, + OVClassCompileModelWithCorrectPropertiesTest, + ::testing::Combine(::testing::Values("MULTI"), + ::testing::ValuesIn(autoConfigsWithSecondaryProperties))); + +INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassCompileModelWithCorrectPropertiesTest, + OVClassCompileModelWithCorrectPropertiesTest, + ::testing::Combine(::testing::Values("AUTO"), + ::testing::ValuesIn(autoConfigsWithSecondaryProperties))); + +const std::vector> automultiExeDeviceConfigs = { + std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}, "TEMPLATE")}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiCompileModelBehaviorTests, + OVCompileModelGetExecutionDeviceTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(automultiExeDeviceConfigs)), + OVCompileModelGetExecutionDeviceTests::getTestCaseName); + +const std::vector multiDevicePriorityConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, + OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY, + ::testing::Combine(::testing::Values("MULTI", "AUTO"), + ::testing::ValuesIn(multiDevicePriorityConfigs)), + OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY::getTestCaseName); + +const std::vector multiModelPriorityConfigs = {{ov::hint::model_priority(ov::hint::Priority::HIGH)}, + {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, + {ov::hint::model_priority(ov::hint::Priority::LOW)}, + {ov::hint::model_priority(ov::hint::Priority::DEFAULT)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, + OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY, + ::testing::Combine(::testing::Values("AUTO:TEMPLATE"), + ::testing::ValuesIn(multiModelPriorityConfigs))); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp new file mode 100644 index 00000000000000..7f274bedbfc2d2 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/callback.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestCallbackTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + OVInferRequestCallbackTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestCallbackTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + OVInferRequestCallbackTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp new file mode 100644 index 00000000000000..aa4f9957601bed --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/infer_request_dynamic.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector AutoConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +std::shared_ptr getFunction2() { + const std::vector inputShape = {1, 4, 20, 20}; + const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32; + + ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; + params.front()->set_friendly_name("Param_1"); + params.front()->get_output_tensor(0).set_names({"input_tensor"}); + auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); + + auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); + auto add = ngraph::builder::makeEltwise(split->output(0), in2add, ngraph::helpers::EltwiseTypes::ADD); + auto relu1 = std::make_shared(add); + + auto in2mult = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); + auto mult = ngraph::builder::makeEltwise(split->output(1), in2mult, ngraph::helpers::EltwiseTypes::MULTIPLY); + auto relu2 = std::make_shared(mult); + + auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); + concat->get_output_tensor(0).set_names({"concat"}); + + return std::make_shared(concat, params, "SplitAddConcat"); +} + +INSTANTIATE_TEST_SUITE_P( + smoke_Auto_BehaviorTests, + OVInferRequestDynamicTests, + ::testing::Combine(::testing::Values(getFunction2()), + ::testing::Values(std::vector, std::vector>>{ + {{1, 4, 20, 20}, {1, 2, 20, 40}}, + {{2, 4, 20, 20}, {2, 2, 20, 40}}}), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(AutoConfigs)), + OVInferRequestDynamicTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp new file mode 100644 index 00000000000000..6ae0a4eab6b20f --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/inference_chaining.hpp" + +#include "common_test_utils/test_constants.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector AutoConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferenceChaining, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(AutoConfigs)), + OVInferenceChaining::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferenceChainingStatic, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(AutoConfigs)), + OVInferenceChainingStatic::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp new file mode 100644 index 00000000000000..e57698f7487f92 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/io_tensor.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector Autoconfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +const std::vector emptyConfigs = {{}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestIOTensorTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestIOTensorTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorTest::getTestCaseName); + +std::vector prcs = { + ov::element::boolean, + ov::element::bf16, + ov::element::f16, + ov::element::f32, + ov::element::f64, + ov::element::i4, + ov::element::i8, + ov::element::i16, + ov::element::i32, + ov::element::i64, + ov::element::u1, + ov::element::u4, + ov::element::u8, + ov::element::u16, + ov::element::u32, + ov::element::u64, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestIOTensorSetPrecisionTest, + ::testing::Combine(::testing::ValuesIn(prcs), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestIOTensorSetPrecisionTest, + ::testing::Combine(::testing::ValuesIn(prcs), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestCheckTensorPrecision, + ::testing::Combine(::testing::ValuesIn(prcs), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestCheckTensorPrecision::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestCheckTensorPrecision, + ::testing::Combine(::testing::ValuesIn(prcs), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestCheckTensorPrecision::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp new file mode 100644 index 00000000000000..6bbf4fe9cf7d30 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/multithreading.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector Multiconfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestMultithreadingTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestMultithreadingTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestMultithreadingTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Multiconfigs)), + OVInferRequestMultithreadingTests::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp new file mode 100644 index 00000000000000..bcb27758486db3 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/perf_counters.hpp" + +using namespace ov::test::behavior; + +namespace { +const std::vector Autoconfigs = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::enable_profiling(true)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::device_bind_buffer(true)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestPerfCountersTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestPerfCountersTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestPerfCountersTest, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestPerfCountersTest::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp new file mode 100644 index 00000000000000..0bf4c2fbaa496a --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/wait.hpp" + +#include + +using namespace ov::test::behavior; + +namespace { +const std::vector Autoconfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + OVInferRequestWaitTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestWaitTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + OVInferRequestWaitTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(Autoconfigs)), + OVInferRequestWaitTests::getTestCaseName); + +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp new file mode 100644 index 00000000000000..998280199f00d1 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/caching_tests.hpp" + +#include +#include +#include + +using namespace ov::test::behavior; +using namespace ngraph; + +namespace { +static const std::vector precisionsTemplate = { + ov::element::f32, +}; + +static const std::vector batchSizesTemplate = {1, 2}; + +const std::vector autoConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase, + CompileModelCacheTestBase, + ::testing::Combine(::testing::ValuesIn(CompileModelCacheTestBase::getStandardFunctions()), + ::testing::ValuesIn(precisionsTemplate), + ::testing::ValuesIn(batchSizesTemplate), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(autoConfigs)), + CompileModelCacheTestBase::getTestCaseName); + +const std::vector LoadFromFileConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; +const std::vector TestTargets = { + ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase, + CompileModelLoadFromFileTestBase, + ::testing::Combine(::testing::ValuesIn(TestTargets), ::testing::ValuesIn(LoadFromFileConfigs)), + CompileModelLoadFromFileTestBase::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase, + CompileModelLoadFromMemoryTestBase, + ::testing::Combine(::testing::ValuesIn(TestTargets), ::testing::ValuesIn(LoadFromFileConfigs)), + CompileModelLoadFromMemoryTestBase::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp new file mode 100644 index 00000000000000..c5afda521a5ca1 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/core_integration.hpp" + +#include + +#include "behavior/ov_plugin/core_integration_sw.hpp" +#include "behavior/ov_plugin/query_model.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/runtime/core.hpp" + +using namespace ov::test::behavior; +using namespace InferenceEngine::PluginConfigParams; + +// defined in plugin_name.cpp +extern const char* cpu_plugin_file_name; + +namespace { +// +// IE Class Common tests with +// + +const std::vector configsWithEmpty = {{}}; +const std::vector configsWithMetaPlugin = {{ov::device::priorities("AUTO")}, + {ov::device::priorities("MULTI")}, + {ov::device::priorities("AUTO", "MULTI")}, + {ov::device::priorities("AUTO", "TEMPLATE")}, + {ov::device::priorities("MULTI", "TEMPLATE")}}; + +INSTANTIATE_TEST_SUITE_P( + smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyWithEmptyConfigTest, + OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest, + ::testing::Combine(::testing::Values("MULTI:AUTO", "AUTO:MULTI", "MULTI:AUTO,TEMPLATE", "AUTO:TEMPLATE,MULTI"), + ::testing::ValuesIn(configsWithEmpty)), + ::testing::PrintToStringParamName()); + +INSTANTIATE_TEST_SUITE_P(smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyTest, + OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest, + ::testing::Combine(::testing::Values("MULTI", "AUTO"), + ::testing::ValuesIn(configsWithMetaPlugin)), + ::testing::PrintToStringParamName()); + +// Several devices case +/* enable below in nightly tests*/ +/* +INSTANTIATE_TEST_SUITE_P(nightly_OVClassSeveralDevicesTest, + OVClassSeveralDevicesTestCompileModel, + ::testing::Values(std::vector({"GPU.0", "GPU.1"}))); + +INSTANTIATE_TEST_SUITE_P(nightly_OVClassSeveralDevicesTest, + OVClassSeveralDevicesTestQueryModel, + ::testing::Values(std::vector({"GPU.0", "GPU.1"}))); + +INSTANTIATE_TEST_SUITE_P(nightly_OVClassSeveralDevicesTest, + OVClassSeveralDevicesTestDefaultCore, + ::testing::Values(std::vector({"GPU.0", "GPU.1"}))); +*/ +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp new file mode 100644 index 00000000000000..dc88e4b57f9a01 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/life_time.hpp" + +using namespace ov::test::behavior; +namespace { +INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, + OVHoldersTest, + ::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE"), + OVHoldersTest::getTestCaseName); + +const std::vector device_names_and_priorities = { + "MULTI:TEMPLATE", // GPU via MULTI, + "AUTO:TEMPLATE", // GPU via AUTO, +}; +INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, + OVHoldersTestWithConfig, + ::testing::ValuesIn(device_names_and_priorities), + OVHoldersTestWithConfig::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp new file mode 100644 index 00000000000000..39756244e9fdad --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -0,0 +1,165 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/properties_tests.hpp" + +#include + +using namespace ov::test::behavior; +using namespace InferenceEngine::PluginConfigParams; + +namespace { +const std::vector multi_Auto_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::device_bind_buffer("YES")}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::device_bind_buffer("NO")}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::enable_startup_fallback("YES")}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::enable_startup_fallback("NO")}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiBehaviorTests, + OVPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multi_Auto_properties)), + OVPropertiesTests::getTestCaseName); + +const std::vector multi_setcore_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), + ov::hint::model_priority(ov::hint::Priority::HIGH)}}; + +const std::vector multi_compileModel_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_MultiCompileModelBehaviorTests, + OVSetPropComplieModleGetPropTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multi_setcore_properties), + ::testing::ValuesIn(multi_compileModel_properties)), + OVSetPropComplieModleGetPropTests::getTestCaseName); + +const std::vector auto_setcore_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::HIGH)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), + ov::hint::model_priority(ov::hint::Priority::HIGH)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::HIGH)}}; + +const std::vector auto_compileModel_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), + ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), + ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoCompileModelBehaviorTests, + OVSetPropComplieModleGetPropTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_setcore_properties), + ::testing::ValuesIn(auto_compileModel_properties)), + OVSetPropComplieModleGetPropTests::getTestCaseName); + +const std::vector default_properties = {{ov::enable_profiling(false)}, + {ov::log::level("LOG_NONE")}, + {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, + {ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, + {ov::intel_auto::device_bind_buffer(false)}, + {ov::intel_auto::enable_startup_fallback(true)}, + {ov::device::priorities("")}}; +INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests, + OVPropertiesDefaultTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(default_properties)), + OVPropertiesDefaultTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, + OVPropertiesDefaultSupportedTests, + ::testing::Values(ov::test::utils::DEVICE_TEMPLATE, ov::test::utils::DEVICE_AUTO)); + +const std::vector auto_multi_incorrect_device_properties = { + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::num_streams(4), + ov::device::properties("TEMPLATE", ov::num_streams(4))}, + {ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), + ov::num_streams(4), + ov::device::properties("TEMPLATE", ov::num_streams(4), ov::enable_profiling(true))}}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsThrow, + OVSetUnsupportPropCompileModelWithoutConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, + ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_multi_incorrect_device_properties)), + OVSetUnsupportPropCompileModelWithoutConfigTests::getTestCaseName); + +// +// IE Class GetMetric +// + +INSTANTIATE_TEST_SUITE_P(smoke_AutoOVGetMetricPropsTest, OVGetMetricPropsTest, ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P( + smoke_AutoOVCheckGetSupportedROMetricsPropsTests, + OVCheckGetSupportedROMetricsPropsTests, + ::testing::Combine(::testing::Values("MULTI", "AUTO"), + ::testing::ValuesIn(OVCheckGetSupportedROMetricsPropsTests::configureProperties( + {ov::device::full_name.name()}))), + OVCheckGetSupportedROMetricsPropsTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + OVCheckSetSupportedRWMandatoryMetricsPropsTests, + OVCheckSetSupportedRWMetricsPropsTests, + ::testing::Combine(::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"), + ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( + {ov::hint::model_priority.name(), ov::log::level.name()}))), + OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); + +const std::vector multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}; + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetDevicePriorityConfigPropsTest, + OVClassSetDevicePriorityConfigPropsTest, + ::testing::Combine(::testing::Values("MULTI", "AUTO"), ::testing::ValuesIn(multiConfigs))); + +const std::vector auto_properties = {{ov::device::priorities("TEMPLATE")}, + {ov::device::priorities("TEMPLATE(1)")}}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests, + OVPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_properties)), + OVPropertiesTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_MultiBehaviorTests, + OVPropertiesTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_properties)), + OVPropertiesTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_MultiAutoOVCheckSetSupportedRWMetricsPropsTests, + OVCheckSetSupportedRWMetricsPropsTests, + ::testing::Combine(::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"), + ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( + {ov::hint::model_priority.name(), ov::log::level.name()}))), + OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp new file mode 100644 index 00000000000000..bad8c61b42cf27 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp @@ -0,0 +1,191 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/plugin/configuration_tests.hpp" + +#include "ie_plugin_config.hpp" +#include "ie_system_conf.h" + +using namespace BehaviorTestsDefinitions; + +namespace { +#if (defined(__APPLE__) || defined(_WIN32)) +auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { + auto numaNodes = InferenceEngine::getAvailableNUMANodes(); + auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + if (coreTypes.size() > 1) { + return std::string{CONFIG_VALUE(HYBRID_AWARE)}; + } else if (numaNodes.size() > 1) { + return std::string{CONFIG_VALUE(NUMA)}; + } else { + return std::string{CONFIG_VALUE(NO)}; + } +}()}; +#else +auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { + auto coreTypes = InferenceEngine::getAvailableCoresTypes(); + if (coreTypes.size() > 1) { + return std::string{CONFIG_VALUE(HYBRID_AWARE)}; + } else { + return std::string{CONFIG_VALUE(YES)}; + } +}()}; +#endif + +const std::vector netPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16}; + +const std::vector> conf = {{}}; + +const std::vector> MultiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}}; + +const std::vector> AutoConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_NONE}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_ERROR}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_WARNING}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_INFO}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_DEBUG}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_TRACE}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_LOW}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + CorrectConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(MultiConfigs)), + CorrectConfigTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + CorrectConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(AutoConfigs)), + CorrectConfigTests::getTestCaseName); + +const std::vector> multiinconfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "OFF"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, "OFF"}}, +}; + +const std::vector> autoinconfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}}}; + +const std::vector> multiconf = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + IncorrectConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiinconfigs)), + IncorrectConfigTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + IncorrectConfigTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(autoinconfigs)), + IncorrectConfigTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + IncorrectConfigAPITests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiinconfigs)), + IncorrectConfigAPITests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + IncorrectConfigAPITests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(autoinconfigs)), + IncorrectConfigAPITests::getTestCaseName); + +const std::vector> auto_multi_prop_config = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}}; + +const std::vector> auto_multi_loadNetWork_config = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}, + {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, + {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, + InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + SetPropLoadNetWorkGetPropTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(auto_multi_prop_config), + ::testing::ValuesIn(auto_multi_loadNetWork_config)), + SetPropLoadNetWorkGetPropTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + SetPropLoadNetWorkGetPropTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(auto_multi_prop_config), + ::testing::ValuesIn(auto_multi_loadNetWork_config)), + SetPropLoadNetWorkGetPropTests::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp new file mode 100644 index 00000000000000..29097f845f876d --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/plugin/core_integration.hpp" + +using namespace BehaviorTestsDefinitions; + +using namespace InferenceEngine::PluginConfigParams; + +// defined in plugin_name.cpp +extern const char* cpu_plugin_file_name; + +namespace { +// +// IE Class Common tests with +// +// +// IE Class GetMetric +// + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_SUPPORTED_METRICS, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_FULL_DEVICE_NAME, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest, + IEClassGetMetricTest_ThrowUnsupported, + ::testing::Values("MULTI", "AUTO")); + +INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetConfigTest, + IEClassGetConfigTest_ThrowUnsupported, + ::testing::Values("MULTI", "AUTO")); +////////////////////////////////////////////////////////////////////////////////////////// +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp new file mode 100644 index 00000000000000..12553dbab98b03 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#ifdef __GLIBC__ +# include +# if __GLIBC_MINOR__ >= 34 +# define ENABLETESTMULTI +# endif +#else +# define ENABLETESTMULTI +#endif + +namespace { + +const Params params[] = { + std::tuple{ov::test::utils::DEVICE_TEMPLATE, {{CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)}}}, +#ifdef ENABLETESTMULTI + std::tuple{ov::test::utils::DEVICE_MULTI, + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}, + std::tuple{ov::test::utils::DEVICE_AUTO, + {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}}, +#endif +}; +} // namespace +/* +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTests, testing::ValuesIn(params), +CoreThreadingTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTestsWithIterations, + testing::Combine(testing::ValuesIn(params), + testing::Values(4), + testing::Values(50), + testing::Values(ModelClass::Default)), + CoreThreadingTestsWithIterations::getTestCaseName); +*/ diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp new file mode 100644 index 00000000000000..b75c4a4a93c51e --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/plugin/set_preprocess.hpp" + +#ifdef ENABLE_GAPI_PREPROCESSING + +using namespace BehaviorTestsDefinitions; +namespace { +const std::vector netPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16}; + +const std::vector> multiConfigs = { + {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}}; + +const std::vector ioPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::U8}; +const std::vector netLayouts = { + InferenceEngine::Layout::NCHW, + // InferenceEngine::Layout::NHWC +}; + +const std::vector ioLayouts = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::NHWC}; + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestPreprocessConversionTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(ioPrecisions), + ::testing::ValuesIn(ioPrecisions), + ::testing::ValuesIn(netLayouts), + ::testing::ValuesIn(ioLayouts), + ::testing::ValuesIn(ioLayouts), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestPreprocessConversionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + InferRequestPreprocessDynamicallyInSetBlobTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Bool(), + ::testing::Bool(), + ::testing::ValuesIn(netLayouts), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(true), // only SetBlob + ::testing::Values(true), // only SetBlob + ::testing::Values(ov::test::utils::DEVICE_MULTI), + ::testing::ValuesIn(multiConfigs)), + InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestPreprocessConversionTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(ioPrecisions), + ::testing::ValuesIn(ioPrecisions), + ::testing::ValuesIn(netLayouts), + ::testing::ValuesIn(ioLayouts), + ::testing::ValuesIn(ioLayouts), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + InferRequestPreprocessConversionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + InferRequestPreprocessDynamicallyInSetBlobTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Bool(), + ::testing::Bool(), + ::testing::ValuesIn(netLayouts), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(true), // only SetBlob + ::testing::Values(true), // only SetBlob + ::testing::Values(ov::test::utils::DEVICE_AUTO), + ::testing::ValuesIn(multiConfigs)), + InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); + +} // namespace + +#endif // ENABLE_GAPI_PREPROCESSING diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/version.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/version.cpp new file mode 100644 index 00000000000000..796149e7cb1e76 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/plugin/version.cpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/plugin/version.hpp" + +using namespace BehaviorTestsDefinitions; +namespace { +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, + VersionTest, + ::testing::Values(ov::test::utils::DEVICE_MULTI), + VersionTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + VersionTest, + ::testing::Values(ov::test::utils::DEVICE_AUTO), + VersionTest::getTestCaseName); +} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp new file mode 100644 index 00000000000000..2c54a0d17b2f8d --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "functional_test_utils/core_config.hpp" + +#include "shared_test_classes/base/ov_subgraph.hpp" + +void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {} + +namespace ov { +namespace test { + +void core_configuration(ov::test::SubgraphBaseTest* test) {} + +} // namespace test +} // namespace ov diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/set_device_name.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/set_device_name.cpp new file mode 100644 index 00000000000000..564307d2daad46 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/set_device_name.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "set_device_name.hpp" + +#include +#include + +namespace ov { +namespace test { +void set_device_suffix(const std::string& suffix) { + if (!suffix.empty()) { + throw std::runtime_error("The suffix can't be used for CPU device!"); + } +} +} // namespace test +} // namespace ov diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp new file mode 100644 index 00000000000000..bf32bfb031b4b2 --- /dev/null +++ b/src/plugins/auto/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -0,0 +1,80 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "functional_test_utils/skip_tests_config.hpp" + +#include + +#include +#include + +#include "ie_parallel.hpp" + +std::vector disabledTestPatterns() { + std::vector retVector{ + // TODO: Issue: 43793 + R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*iPRC=0.*_iLT=1.*)", + R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*oPRC=0.*_oLT=1.*)", + + // Not expected behavior + R"(.*Behavior.*InferRequestSetBlobByType.*Batched.*)", + R"(.*Auto.*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableWithIncorrectConfig.*)", + + // Not implemented yet: + R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", + R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModel.*)", + R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)", + R"(.*Behavior.*OVCompiledModelBaseTest.*canExportModel.*)", + R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNetWithIncorrectConfig.*)", + R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModelWithIncorrectConfig.*)", + + // TODO: CVS-104942 + R"(.*(Auto|Multi).*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableAndCheckConfig.*)", + R"(.*(Auto|Multi).*SetPropLoadNetWorkGetPropTests.*)", + + // CPU does not support dynamic rank + // Issue: CVS-66778 + R"(.*smoke_Auto_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", + R"(.*smoke_Auto_BehaviorTests.*DynamicOutputToDynamicInput.*)", + R"(.*smoke_Auto_BehaviorTests.*DynamicInputToDynamicOutput.*)", + // unsupported metrics + R"(.*smoke_AutoOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", + + // Issue: + // New API tensor tests + R"(.*OVInferRequestCheckTensorPrecision.*type=i4.*)", + R"(.*OVInferRequestCheckTensorPrecision.*type=u1.*)", + R"(.*OVInferRequestCheckTensorPrecision.*type=u4.*)", + + // AUTO does not support import / export + R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)", + + // New plugin API doesn't support changes of pre-processing + R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", + R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", + // New plugin work with tensors, so it means that blob in old API can have different pointers + R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)", + R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)", + R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputAfterInferSync.*)", + R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputAfterInferSync.*)", + // TODO Issue 100145 + R"(.*Behavior.*InferRequestIOBBlobTest.*canReallocateExternalBlobViaGet.*)", + R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", + R"(.*Behavior.*OVInferRequestDynamicTests.*InferUpperBoundNetworkAfterIOTensorsReshaping.*)", + // Not expected behavior + R"(.*Behavior.*(Multi|Auto).*InferRequestSetBlobByType.*Batched.*)", + R"(.*(Multi|Auto).*Behavior.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)", + // template plugin doesn't support this case + R"(.*OVInferRequestPerfCountersTest.*CheckOperationInProfilingInfo.*)"}; + +#if !defined(OPENVINO_ARCH_X86_64) + // very time-consuming test + retVector.emplace_back(R"(.*OVInferConsistencyTest.*)"); +#endif + +#if defined(_WIN32) + retVector.emplace_back(R"(.*LoadNetworkCompiledKernelsCacheTest.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)"); +#endif + return retVector; +} diff --git a/src/plugins/auto/tests/unit/auto_unit_test.cpp b/src/plugins/auto/tests/unit/auto_unit_test.cpp index 64de772b599bdd..139533bc378bba 100644 --- a/src/plugins/auto/tests/unit/auto_unit_test.cpp +++ b/src/plugins/auto/tests/unit/auto_unit_test.cpp @@ -3,11 +3,22 @@ // #include "include/auto_unit_test.hpp" + #include "common_test_utils/file_utils.hpp" #include "openvino/core/any.hpp" +#include "openvino/opsets/opset11.hpp" +#include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/util/file_util.hpp" -#include "openvino/util/shared_object.hpp" + +namespace testing { +namespace internal { +template <> +void PrintTo(const ov::Any& a, std::ostream* os) { + *os << "using custom PrintTo ov::Any"; +} +} // namespace internal +} // namespace testing std::shared_ptr ov::mock_auto_plugin::tests::BaseTest::create_model() { auto param = std::make_shared(ov::element::i64, ov::Shape{1, 3, 2, 2}); @@ -30,52 +41,56 @@ ov::mock_auto_plugin::tests::BaseTest::BaseTest() { NiceMock* mock_auto = new NiceMock(); plugin.reset(mock_auto); // construct mock plugin - mock_plugin_cpu = std::make_shared>(); - mock_plugin_gpu = std::make_shared>(); + mock_plugin_cpu = std::make_shared>(); + mock_plugin_gpu = std::make_shared>(); // prepare mockExeNetwork - mockIExeNet = std::make_shared>(model, mock_plugin_cpu); + mockIExeNet = std::make_shared>(model, mock_plugin_cpu); mockExeNetwork = {mockIExeNet, {}}; - mockIExeNetActual = std::make_shared>(model, mock_plugin_gpu); + mockIExeNetActual = std::make_shared>(model, mock_plugin_gpu); mockExeNetworkActual = {mockIExeNetActual, {}}; - inferReqInternal = std::make_shared(mockIExeNet); + ON_CALL(*mockIExeNet.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs())); + ON_CALL(*mockIExeNet.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs())); + ON_CALL(*mockIExeNetActual.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs())); + ON_CALL(*mockIExeNetActual.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs())); + inferReqInternal = std::make_shared(mockIExeNet); + ON_CALL(*mockIExeNet.get(), create_sync_infer_request()).WillByDefault(Return(inferReqInternal)); optimalNum = (uint32_t)1; ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) .WillByDefault(Return(optimalNum)); - inferReqInternalActual = std::make_shared(mockIExeNetActual); + inferReqInternalActual = std::make_shared(mockIExeNetActual); + ON_CALL(*mockIExeNetActual.get(), create_sync_infer_request()).WillByDefault(Return(inferReqInternalActual)); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) .WillByDefault(Return(optimalNum)); ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault([this]() { - return mockIExeNet->ICompiledModel::create_infer_request(); - }); + return mockIExeNet->ICompiledModel::create_infer_request(); + }); ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault([this]() { - return mockIExeNetActual->ICompiledModel::create_infer_request(); - }); + return mockIExeNetActual->ICompiledModel::create_infer_request(); + }); std::vector supported_props = {ov::hint::num_requests}; ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::supported_properties.name()))) .WillByDefault(Return(ov::Any(supported_props))); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::supported_properties.name()))) .WillByDefault(Return(ov::Any(supported_props))); unsigned int num = 1; - ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::hint::num_requests.name()))) - .WillByDefault(Return(ov::Any(num))); + ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::hint::num_requests.name()))).WillByDefault(Return(ov::Any(num))); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::hint::num_requests.name()))) .WillByDefault(Return(ov::Any(num))); ON_CALL(*plugin, get_device_list).WillByDefault([this](const ov::AnyMap& config) { return plugin->Plugin::get_device_list(config); }); ON_CALL(*plugin, parse_meta_devices) - .WillByDefault( - [this](const std::string& priorityDevices, const ov::AnyMap& config) { + .WillByDefault([this](const std::string& priorityDevices, const ov::AnyMap& config) { return plugin->Plugin::parse_meta_devices(priorityDevices, config); }); ON_CALL(*plugin, select_device) .WillByDefault([this](const std::vector& metaDevices, - const std::string& netPrecision, - unsigned int priority) { + const std::string& netPrecision, + unsigned int priority) { return plugin->Plugin::select_device(metaDevices, netPrecision, priority); }); @@ -115,47 +130,46 @@ ov::mock_auto_plugin::tests::AutoTest::AutoTest() { ON_CALL(*core, get_property(_, StrEq(ov::supported_properties.name()), _)) .WillByDefault(RETURN_MOCK_VALUE(supportedProps)); ON_CALL(*core, get_property(_, StrEq(ov::compilation_num_threads.name()), _)).WillByDefault(Return(12)); - std::vector cpuCability = {"FP32", "FP16", "INT8", "BIN"}; - std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"}; - std::vector othersCability = {"FP32", "FP16"}; + std::vector cpuCability = {"FP32", "FP16", "INT8", "BIN"}; + std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"}; + std::vector othersCability = {"FP32", "FP16"}; std::string igpuArchitecture = "GPU: vendor=0x8086 arch=0"; std::string dgpuArchitecture = "GPU: vendor=0x8086 arch=1"; auto iGpuType = ov::device::Type::INTEGRATED; auto dGpuType = ov::device::Type::DISCRETE; - ON_CALL(*core, get_property(StrEq(ov::test::utils::DEVICE_CPU), - StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(cpuCability)); - ON_CALL(*core, get_property(HasSubstr("GPU"), - StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(gpuCability)); - ON_CALL(*core, get_property(StrEq("OTHERS"), - StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(othersCability)); - ON_CALL(*core, get_property(StrEq("GPU"), - StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture)); - ON_CALL(*core, get_property(StrEq("GPU.0"), - StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture)); - ON_CALL(*core, get_property(StrEq("GPU.1"), - StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture)); - ON_CALL(*core, get_property(StrEq("GPU"), - StrEq(ov::device::type.name()), _)).WillByDefault(RETURN_MOCK_VALUE(iGpuType)); - ON_CALL(*core, get_property(StrEq("GPU.0"), - StrEq(ov::device::type.name()), _)).WillByDefault(RETURN_MOCK_VALUE(iGpuType)); - ON_CALL(*core, get_property(StrEq("GPU.1"), - StrEq(ov::device::type.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dGpuType)); - const std::vector metrics = {METRIC_KEY(SUPPORTED_CONFIG_KEYS), ov::device::full_name.name(), ov::device::id.name()}; + ON_CALL(*core, get_property(StrEq(ov::test::utils::DEVICE_CPU), StrEq(ov::device::capabilities.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(cpuCability)); + ON_CALL(*core, get_property(HasSubstr("GPU"), StrEq(ov::device::capabilities.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(gpuCability)); + ON_CALL(*core, get_property(StrEq("OTHERS"), StrEq(ov::device::capabilities.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(othersCability)); + ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::architecture.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture)); + ON_CALL(*core, get_property(StrEq("GPU.0"), StrEq(ov::device::architecture.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture)); + ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::architecture.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture)); + ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::type.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(iGpuType)); + ON_CALL(*core, get_property(StrEq("GPU.0"), StrEq(ov::device::type.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(iGpuType)); + ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::type.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(dGpuType)); + const std::vector metrics = {METRIC_KEY(SUPPORTED_CONFIG_KEYS), + ov::device::full_name.name(), + ov::device::id.name()}; const char igpuFullDeviceName[] = "Intel(R) Gen9 HD Graphics (iGPU)"; const char dgpuFullDeviceName[] = "Intel(R) Iris(R) Xe MAX Graphics (dGPU)"; - ON_CALL(*core, get_property(_, StrEq(METRIC_KEY(SUPPORTED_METRICS)), _)) - .WillByDefault(RETURN_MOCK_VALUE(metrics)); - ON_CALL(*core, get_property(_, ov::supported_properties.name(), _)) - .WillByDefault(Return(ov::Any(supportedProps))); - ON_CALL(*core, get_property(StrEq("GPU"), - StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName)); - ON_CALL(*core, get_property(StrEq("GPU"), - StrEq(ov::device::id.name()), _)).WillByDefault(Return(ov::Any("0"))); - ON_CALL(*core, get_property(StrEq("GPU.0"), - StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName)); - ON_CALL(*core, get_property(StrEq("GPU.1"), - StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuFullDeviceName)); - const std::vector availableDevs = {"CPU", "GPU.0", "GPU.1"}; + ON_CALL(*core, get_property(_, StrEq(METRIC_KEY(SUPPORTED_METRICS)), _)).WillByDefault(RETURN_MOCK_VALUE(metrics)); + ON_CALL(*core, get_property(_, ov::supported_properties.name(), _)).WillByDefault(Return(ov::Any(supportedProps))); + ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::full_name.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName)); + ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::id.name()), _)).WillByDefault(Return(ov::Any("0"))); + ON_CALL(*core, get_property(StrEq("GPU.0"), StrEq(ov::device::full_name.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName)); + ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::full_name.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(dgpuFullDeviceName)); + const std::vector availableDevs = {"CPU", "GPU.0", "GPU.1"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); ON_CALL(*core, get_supported_property).WillByDefault([](const std::string& device, const ov::AnyMap& fullConfigs) { auto item = fullConfigs.find(ov::device::properties.name()); @@ -186,180 +200,35 @@ ov::mock_auto_plugin::tests::AutoTest::~AutoTest() { core.reset(); } -namespace { - -std::string get_mock_engine_path() { - std::string mockEngineName("mock_engine"); - return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - mockEngineName + IE_BUILD_POSTFIX); -} - -template -std::function make_std_function(const std::shared_ptr so, const std::string& functionName) { - std::function ptr(reinterpret_cast(ov::util::get_symbol(so, functionName.c_str()))); - return ptr; -} - -ov::PropertyName RO_property(const std::string& propertyName) { - return ov::PropertyName(propertyName, ov::PropertyMutability::RO); -} - -ov::PropertyName RW_property(const std::string& propertyName) { - return ov::PropertyName(propertyName, ov::PropertyMutability::RW); -} - -} // namespace - -ov::mock_auto_plugin::tests::AutoTestWithRealCore::AutoTestWithRealCore() { - register_plugin_simple(core, "MOCK_CPU", {}); - // validate the mock plugin, to ensure the order as well - core.get_property("MOCK_CPU", ov::supported_properties); - register_plugin_support_batch_and_context(core, "MOCK_GPU", {}); - // validate the mock plugin - core.get_property("MOCK_GPU", ov::supported_properties); - ov::Any optimalNum = (uint32_t)1; - ON_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), _)) - .WillByDefault(Return(mockIExeNet)); - ON_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), _)) - .WillByDefault(Return(mockIExeNetActual)); -} - -void ov::mock_auto_plugin::tests::AutoTestWithRealCore::reg_plugin(ov::Core& core, - std::shared_ptr plugin, - const std::string& device_name, - const ov::AnyMap& properties) { - std::string libraryPath = get_mock_engine_path(); - if (!m_so) - m_so = ov::util::load_shared_object(libraryPath.c_str()); - if (device_name.find("MULTI") == std::string::npos && device_name.find("AUTO") == std::string::npos) - plugin->set_device_name(device_name); - std::function inject_mock_plugin = make_std_function(m_so, "InjectPlugin"); - - inject_mock_plugin(plugin.get()); - core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), - device_name, - properties); -} - -// test -void ov::mock_auto_plugin::tests::AutoTestWithRealCore::register_plugin_support_batch_and_context(ov::Core& core, - const std::string& device_name, - const ov::AnyMap& properties) { - auto remote_context = std::make_shared(mock_plugin_gpu->get_device_name()); - m_mock_contexts.push_back(remote_context); - ON_CALL(*mock_plugin_gpu, compile_model(_, _)).WillByDefault(Return(mockIExeNetActual)); - ON_CALL(*mock_plugin_gpu, create_context).WillByDefault(Return(ov::SoPtr(remote_context, nullptr))); - ON_CALL(*mock_plugin_gpu, get_default_context).WillByDefault(Return(ov::SoPtr(remote_context, nullptr))); - ON_CALL(*mock_plugin_gpu, get_property).WillByDefault([](const std::string& name, const ov::AnyMap& property) -> ov::Any { - const std::vector roProperties{ - RO_property(ov::supported_properties.name()), - RO_property(ov::optimal_batch_size.name()), - RO_property(ov::optimal_number_of_infer_requests.name()), - RO_property(ov::device::capabilities.name()), - RO_property(ov::device::type.name()), - RO_property(ov::device::uuid.name()), - }; - // the whole config is RW before network is loaded. - const std::vector rwProperties{ - RW_property(ov::num_streams.name()), - RW_property(ov::enable_profiling.name()), - RW_property(ov::compilation_num_threads.name()), - RW_property(ov::hint::performance_mode.name()), - RW_property(ov::hint::num_requests.name()) - }; - if (name == ov::supported_properties) { - std::vector supportedProperties; - supportedProperties.reserve(roProperties.size() + rwProperties.size()); - supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end()); - supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end()); - - return decltype(ov::supported_properties)::value_type(supportedProperties); - } else if (name == ov::optimal_number_of_infer_requests.name()) { - return decltype(ov::optimal_number_of_infer_requests)::value_type(1); - } else if (name == ov::optimal_batch_size.name()) { - return decltype(ov::optimal_batch_size)::value_type(4); - } else if (name == ov::device::capabilities.name()) { - return decltype(ov::device::capabilities)::value_type({"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"}); - } else if (name == ov::device::type.name()) { - return decltype(ov::device::type)::value_type(ov::device::Type::INTEGRATED); - } else if (name == ov::loaded_from_cache.name()) { - return false; - } else if (name == ov::enable_profiling.name()) { - return decltype(ov::enable_profiling)::value_type{false}; - } else if (name == ov::streams::num.name()) { - return decltype(ov::streams::num)::value_type{2}; - } else if (name == ov::compilation_num_threads.name()) { - return decltype(ov::compilation_num_threads)::value_type{4}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == ov::internal::supported_properties) { - return decltype(ov::internal::supported_properties)::value_type({}); - } - OPENVINO_NOT_IMPLEMENTED; - }); - std::shared_ptr base_plugin = mock_plugin_gpu; - reg_plugin(core, base_plugin, device_name, properties); +void ov::mock_auto_plugin::MockISyncInferRequest::allocate_tensor_impl(ov::SoPtr& tensor, + const element::Type& element_type, + const Shape& shape) { + if (!tensor || tensor->get_element_type() != element_type) { + tensor = ov::make_tensor(element_type, shape); + } else { + tensor->set_shape(shape); + } } -void ov::mock_auto_plugin::tests::AutoTestWithRealCore::register_plugin_simple(ov::Core& core, - const std::string& device_name, - const ov::AnyMap& properties) { - ON_CALL(*mock_plugin_cpu, compile_model(_, _)).WillByDefault(Return(mockIExeNet)); - ON_CALL(*mock_plugin_cpu, create_context).WillByDefault(Throw(ov::Exception{"NotImplemented"})); - ON_CALL(*mock_plugin_cpu, get_default_context).WillByDefault(Throw(ov::Exception{"NotImplemented"})); - ON_CALL(*mock_plugin_cpu, get_property).WillByDefault([](const std::string& name, const ov::AnyMap& property) -> ov::Any { - const std::vector roProperties{ - RO_property(ov::supported_properties.name()), - RO_property(ov::device::uuid.name()), - }; - // the whole config is RW before network is loaded. - const std::vector rwProperties{ - RW_property(ov::num_streams.name()), - RW_property(ov::enable_profiling.name()), - RW_property(ov::hint::performance_mode.name()) - }; - if (name == ov::supported_properties) { - std::vector supportedProperties; - supportedProperties.reserve(roProperties.size() + rwProperties.size()); - supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end()); - supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end()); - - return decltype(ov::supported_properties)::value_type(supportedProperties); - } else if (name == ov::loaded_from_cache.name()) { - return false; - } else if (name == ov::enable_profiling.name()) { - return decltype(ov::enable_profiling)::value_type{false}; - } else if (name == ov::streams::num.name()) { - return decltype(ov::streams::num)::value_type{2}; - } else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : rwProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key - std::vector configs; - for (const auto& property : roProperties) { - configs.emplace_back(property); - } - return configs; - } else if (name == ov::internal::supported_properties) { - return decltype(ov::internal::supported_properties)::value_type({}); - } - OPENVINO_NOT_IMPLEMENTED; - }); - std::shared_ptr base_plugin = mock_plugin_cpu; - - reg_plugin(core, base_plugin, device_name, properties); +ov::mock_auto_plugin::MockISyncInferRequest::MockISyncInferRequest( + const std::shared_ptr& compiled_model) + : ov::ISyncInferRequest(compiled_model) { + OPENVINO_ASSERT(compiled_model); + // Allocate input/output tensors + for (const auto& input : get_inputs()) { + allocate_tensor(input, [this, input](ov::SoPtr& tensor) { + // Can add a check to avoid double work in case of shared tensors + allocate_tensor_impl(tensor, + input.get_element_type(), + input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape()); + }); + } + for (const auto& output : get_outputs()) { + allocate_tensor(output, [this, output](ov::SoPtr& tensor) { + // Can add a check to avoid double work in case of shared tensors + allocate_tensor_impl(tensor, + output.get_element_type(), + output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape()); + }); + } } \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/compile_model_metric_test.cpp b/src/plugins/auto/tests/unit/compile_model_metric_test.cpp index 698c7deb03d990..772dca30497ae4 100644 --- a/src/plugins/auto/tests/unit/compile_model_metric_test.cpp +++ b/src/plugins/auto/tests/unit/compile_model_metric_test.cpp @@ -97,11 +97,11 @@ class ExecNetworkget_propertyOptimalNumInferReq : public tests::AutoTest, } }; -using modelPrioPerfHintTestParams = std::tuple; class ExecNetworkget_propertyOtherTest : public tests::AutoTest, @@ -113,11 +113,7 @@ class ExecNetworkget_propertyOtherTest : public tests::AutoTest, std::string actualDeviceName; std::string performanceMode; ov::Any modelPriority; - std::tie(isNewAPI, - actualSleep, - actualDeviceName, - performanceMode, - modelPriority) = obj.param; + std::tie(isNewAPI, actualSleep, actualDeviceName, performanceMode, modelPriority) = obj.param; std::ostringstream result; if (isNewAPI) { result << "_isNewAPI_" @@ -227,47 +223,60 @@ TEST_P(ExecNetworkget_propertyOptimalNumInferReq, OPTIMAL_NUMBER_OF_INFER_REQUES EXPECT_CALL(*plugin, select_device(_, _, _)).Times(1); if (cpuSleep) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - return mockExeNetwork; - })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + return mockExeNetwork; + })); } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetwork)); } if (actualSleep) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - return mockExeNetworkActual; - })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(actualDeviceName)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + return mockExeNetworkActual; + })); } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(actualDeviceName)), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum)); + .WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum)); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum)); + .WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum)); - EXPECT_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .Times(AtLeast(1)); + EXPECT_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))).Times(AtLeast(1)); EXPECT_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .Times(AtLeast(1)); + .Times(AtLeast(1)); - EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)).Times(1); + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .Times(1); - EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)).Times(1); + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(actualDeviceName)), + _)) + .Times(1); if (cpuCustomerNum == -1) { EXPECT_CALL(*mockIExeNet.get(), create_sync_infer_request()).Times(cpuOptimalNum); @@ -281,7 +290,7 @@ TEST_P(ExecNetworkget_propertyOptimalNumInferReq, OPTIMAL_NUMBER_OF_INFER_REQUES EXPECT_CALL(*mockIExeNetActual.get(), create_sync_infer_request()).Times(actualCustomerNum); } - auto AutoExecNetwork = plugin->compile_model(model, config); + auto AutoExecNetwork = plugin->compile_model(model, config); auto result = AutoExecNetwork->get_property(ov::optimal_number_of_infer_requests.name()).as(); EXPECT_EQ(result, expectOptimalNum); } @@ -292,57 +301,58 @@ TEST_P(ExecNetworkget_propertyOptimalNumInferReq, OPTIMAL_NUMBER_OF_INFER_REQUES // every element for ConfigParams // {is throughput mode, cpuOptimalNum, customer hope for cpu infer requset num, if cpu sleep when load, // actualOptimalNum, customer hope for actual infer requset num, if actual sleep when load, actual device Name -// expectOptimalNum of Auto ExecNetwork, gpu Number of requests, if actual supported OptimalNum, default Value of OptimalNum} +// expectOptimalNum of Auto ExecNetwork, gpu Number of requests, if actual supported OptimalNum, default Value of +// OptimalNum} // const std::vector testConfigs = { - ConfigParams {false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, - ConfigParams {false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, - ConfigParams {true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, - ConfigParams {false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, - ConfigParams {false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, - ConfigParams {true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 48, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, true}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, true, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, false}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 10, false, false}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, false}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 6, true, false}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, false, false}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, true, false}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, false}, - ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, - ConfigParams {false, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 1, 6, true, true}, - ConfigParams {true, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, - ConfigParams {false, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, - ConfigParams {false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, false}, - ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, false}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, false}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 1, true, false}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, false, false}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, false, false}, - ConfigParams {true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 4, 6, true, true}, - ConfigParams {true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 2, true, true}, - ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, true, true}, - ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, true, true}, - ConfigParams {false, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, true, true}, - ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, true}, - ConfigParams {false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true}, - ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true}, - ConfigParams {false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, - ConfigParams {true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, - ConfigParams {false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true}, - ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true}, - ConfigParams {false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, - ConfigParams {true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, - }; + ConfigParams{false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, + ConfigParams{false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, + ConfigParams{true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, + ConfigParams{false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, + ConfigParams{false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, + ConfigParams{true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 48, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, true}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, true, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, false}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 10, false, false}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, false}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 6, true, false}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, false, false}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, true, false}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, false}, + ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, + ConfigParams{false, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 1, 6, true, true}, + ConfigParams{true, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, + ConfigParams{false, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true}, + ConfigParams{false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, false}, + ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, false}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, false}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 1, true, false}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, false, false}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, false, false}, + ConfigParams{true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 4, 6, true, true}, + ConfigParams{true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 2, true, true}, + ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, true, true}, + ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, true, true}, + ConfigParams{false, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, true, true}, + ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, true}, + ConfigParams{false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true}, + ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true}, + ConfigParams{false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, + ConfigParams{true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, + ConfigParams{false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true}, + ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true}, + ConfigParams{false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, + ConfigParams{true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true}, +}; INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetworkget_propertyOptimalNumInferReq, @@ -357,11 +367,7 @@ class ExecNetworkGetMetricOtherTest : public tests::AutoTest, std::string actualDeviceName; std::string performanceMode; ov::Any modelPriority; - std::tie(isNewAPI, - actualSleep, - actualDeviceName, - performanceMode, - modelPriority) = obj.param; + std::tie(isNewAPI, actualSleep, actualDeviceName, performanceMode, modelPriority) = obj.param; std::ostringstream result; if (isNewAPI) { result << "_isNewAPI_" @@ -392,11 +398,7 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t std::string actualDeviceName; std::string performanceHint; ov::Any modelPriority; - std::tie(isNewAPI, - actualSleep, - actualDeviceName, - performanceHint, - modelPriority) = this->GetParam(); + std::tie(isNewAPI, actualSleep, actualDeviceName, performanceHint, modelPriority) = this->GetParam(); config.insert(ov::device::priorities(ov::test::utils::DEVICE_CPU + std::string(",") + actualDeviceName)); config.insert(ov::hint::performance_mode(performanceHint)); config.insert({ov::hint::model_priority.name(), modelPriority.as()}); @@ -418,14 +420,17 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t EXPECT_CALL(*plugin, select_device(_, _, _)).Times(1); ON_CALL(*core, get_property(_, StrEq(ov::compilation_num_threads.name()), _)).WillByDefault(Return(8)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetwork)); if (actualSleep) { ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)) + ::testing::Matcher(StrEq(actualDeviceName)), + _)) .WillByDefault(InvokeWithoutArgs([this]() { std::this_thread::sleep_for(std::chrono::milliseconds(5000)); return mockExeNetworkActual; @@ -433,14 +438,15 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t } else { ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(actualDeviceName)), _)) + ::testing::Matcher(StrEq(actualDeviceName)), + _)) .WillByDefault(Return(mockExeNetworkActual)); } ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum)); + .WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum)); ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum)); + .WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum)); auto AutoExecNetwork = plugin->compile_model(model, config); auto result = AutoExecNetwork->get_property(ov::hint::performance_mode.name()).as(); @@ -455,61 +461,25 @@ const std::vector modelPrioPerfHintConfig = { ov::test::utils::DEVICE_GPU, "THROUGHPUT", CONFIG_VALUE(MODEL_PRIORITY_LOW)}, - modelPrioPerfHintTestParams{false, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - CONFIG_VALUE(MODEL_PRIORITY_LOW)}, + modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "LATENCY", CONFIG_VALUE(MODEL_PRIORITY_LOW)}, modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", CONFIG_VALUE(MODEL_PRIORITY_MED)}, - modelPrioPerfHintTestParams{false, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - CONFIG_VALUE(MODEL_PRIORITY_MED)}, + modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "LATENCY", CONFIG_VALUE(MODEL_PRIORITY_MED)}, modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, CONFIG_VALUE(THROUGHPUT), CONFIG_VALUE(MODEL_PRIORITY_HIGH)}, - modelPrioPerfHintTestParams{false, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - CONFIG_VALUE(MODEL_PRIORITY_HIGH)}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "THROUGHPUT", - "LOW"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - "LOW"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "THROUGHPUT", - "MEDIUM"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - "MEDIUM"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "THROUGHPUT", - "HIGH"}, - modelPrioPerfHintTestParams{true, - true, - ov::test::utils::DEVICE_GPU, - "LATENCY", - "HIGH"}}; + modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "LATENCY", CONFIG_VALUE(MODEL_PRIORITY_HIGH)}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", "LOW"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "LATENCY", "LOW"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", "MEDIUM"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "LATENCY", "MEDIUM"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", "HIGH"}, + modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "LATENCY", "HIGH"}}; INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetworkGetMetricOtherTest, diff --git a/src/plugins/auto/tests/unit/compile_model_property_test.cpp b/src/plugins/auto/tests/unit/compile_model_property_test.cpp index 7dacdae919cc04..278c3dbbfe3363 100644 --- a/src/plugins/auto/tests/unit/compile_model_property_test.cpp +++ b/src/plugins/auto/tests/unit/compile_model_property_test.cpp @@ -24,7 +24,7 @@ using namespace ov::mock_auto_plugin; using ConfigParams = std::tuple, // hardware device name to expect loading network on - ov::AnyMap>; // secondary property setting to device + ov::AnyMap>; // secondary property setting to device static std::vector testConfigs; @@ -52,51 +52,72 @@ class LoadNetworkWithSecondaryConfigsMockTest : public tests::AutoTest, public : static std::vector CreateConfigs() { testConfigs.clear(); testConfigs.push_back( - ConfigParams{"AUTO", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); testConfigs.push_back(ConfigParams{"AUTO", {"CPU"}, {{"NUM_STREAMS", "12"}, {"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); + ConfigParams{"AUTO", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); testConfigs.push_back(ConfigParams{"AUTO", {"CPU", "GPU"}, {{"NUM_STREAMS", "15"}, {"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO:CPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); + ConfigParams{"AUTO:CPU", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO:CPU,GPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + ConfigParams{"AUTO:CPU,GPU", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO:GPU", {"GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); - testConfigs.push_back(ConfigParams{"AUTO:GPU,CPU", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); + ConfigParams{"AUTO:GPU", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); + testConfigs.push_back( + ConfigParams{"AUTO:GPU,CPU", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); testConfigs.push_back( - ConfigParams{"MULTI:CPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); - testConfigs.push_back(ConfigParams{"MULTI:CPU,GPU", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + ConfigParams{"MULTI:CPU", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); testConfigs.push_back( - ConfigParams{"MULTI:GPU", {"GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); - testConfigs.push_back(ConfigParams{"MULTI:GPU,CPU", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); + ConfigParams{"MULTI:CPU,GPU", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + testConfigs.push_back( + ConfigParams{"MULTI:GPU", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); + testConfigs.push_back( + ConfigParams{"MULTI:GPU,CPU", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); return testConfigs; } void SetUp() override { std::vector availableDevs = {"CPU", "GPU"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) .WillByDefault(Return(mockExeNetwork)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrNe(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrNe(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } }; @@ -123,11 +144,10 @@ TEST_P(LoadNetworkWithSecondaryConfigsMockTest, LoadNetworkWithSecondaryConfigsT ov::util::Read{}(strConfigs, deviceConfigs); } } - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(deviceName), - ::testing::Matcher(MapContains(deviceConfigs)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(deviceName), + ::testing::Matcher(MapContains(deviceConfigs)))) .Times(1); } @@ -144,32 +164,40 @@ TEST_P(AutoLoadExeNetworkFailedTest, checkLoadFailMassage) { if (device.find("MULTI") != std::string::npos) plugin->set_device_name("MULTI"); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), - ::testing::Matcher(_))) - .WillByDefault(Throw(ov::Exception{"Mock GPU Load Failed"})); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), - ::testing::Matcher(_))) - .WillByDefault(Throw(ov::Exception{"Mock CPU Load Failed"})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + ::testing::Matcher(_))) + .WillByDefault(Throw(ov::Exception{"Mock GPU Load Failed"})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + ::testing::Matcher(_))) + .WillByDefault(Throw(ov::Exception{"Mock CPU Load Failed"})); if (device == "AUTO") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[AUTO] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[AUTO] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed"); } else if (device == "AUTO:CPU") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[AUTO] compile model failed, CPU:Mock CPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[AUTO] compile model failed, CPU:Mock CPU Load Failed"); } else if (device == "AUTO:GPU") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[AUTO] compile model failed, GPU:Mock GPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[AUTO] compile model failed, GPU:Mock GPU Load Failed"); } else if (device == "MULTI") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[MULTI] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[MULTI] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed"); } else if (device == "MULTI:CPU") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[MULTI] compile model failed, CPU:Mock CPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[MULTI] compile model failed, CPU:Mock CPU Load Failed"); } else if (device == "MULTI:GPU") { - EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception, - "[MULTI] compile model failed, GPU:Mock GPU Load Failed"); + EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), + ov::Exception, + "[MULTI] compile model failed, GPU:Mock GPU Load Failed"); } } @@ -184,9 +212,9 @@ const std::vector testConfigsAutoLoadFailed = { ConfigParams{"AUTO:GPU", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}, ConfigParams{"MULTI", {"CPU", "GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}, ConfigParams{"MULTI:CPU", {"CPU"}, {{"MULTI_DEVICE_PRIORITIES", "CPU"}}}, - ConfigParams{"MULTI:GPU", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}} - }; + ConfigParams{"MULTI:GPU", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}}; -INSTANTIATE_TEST_SUITE_P(smoke_AutoLoadExeNetworkFailedTest, AutoLoadExeNetworkFailedTest, - ::testing::ValuesIn(testConfigsAutoLoadFailed), - AutoLoadExeNetworkFailedTest::getTestCaseName); \ No newline at end of file +INSTANTIATE_TEST_SUITE_P(smoke_AutoLoadExeNetworkFailedTest, + AutoLoadExeNetworkFailedTest, + ::testing::ValuesIn(testConfigsAutoLoadFailed), + AutoLoadExeNetworkFailedTest::getTestCaseName); \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/ctput_test.cpp b/src/plugins/auto/tests/unit/ctput_test.cpp index 8839acd23e4dcd..4b9cfd987b0133 100644 --- a/src/plugins/auto/tests/unit/ctput_test.cpp +++ b/src/plugins/auto/tests/unit/ctput_test.cpp @@ -8,8 +8,7 @@ using namespace ov::mock_auto_plugin; using Config = std::map; using ConfigParams = std::tuple>; -class LoadNetworkWithCTPUTMockTest : public tests::AutoTest, - public ::testing::TestWithParam { +class LoadNetworkWithCTPUTMockTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector targetDevices; @@ -29,12 +28,16 @@ class LoadNetworkWithCTPUTMockTest : public tests::AutoTest, void SetUp() override { std::vector availableDevs = {"CPU", "GPU"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) .WillByDefault(Return(mockExeNetwork)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } }; @@ -51,17 +54,15 @@ TEST_P(LoadNetworkWithCTPUTMockTest, CTPUTSingleDevLogicTest) { // Call single device logic and performance hint is THROUGHPUT EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(targetDevice), - ::testing::Matcher( - ComparePerfHint("THROUGHPUT")))) + ::testing::Matcher(targetDevice), + ::testing::Matcher(ComparePerfHint("THROUGHPUT")))) .Times(1); // if target device only has GPU, no CPU helper to be called if (targetDevice.find("GPU") != std::string::npos) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_CPU), - ::testing::Matcher( - ComparePerfHint("LATENCY")))) + ::testing::Matcher(ov::test::utils::DEVICE_CPU), + ::testing::Matcher(ComparePerfHint("LATENCY")))) .Times(0); } } else { @@ -71,18 +72,16 @@ TEST_P(LoadNetworkWithCTPUTMockTest, CTPUTSingleDevLogicTest) { targetDev += ((deviceName == targetDevices.back()) ? "" : ","); EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(deviceName), - ::testing::Matcher( - ComparePerfHint("THROUGHPUT")))) + ::testing::Matcher(deviceName), + ::testing::Matcher(ComparePerfHint("THROUGHPUT")))) .Times(1); } config.insert(ov::device::priorities(targetDev)); // no CPU helper to be called EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_CPU), - ::testing::Matcher( - ComparePerfHint("LATENCY")))) + ::testing::Matcher(ov::test::utils::DEVICE_CPU), + ::testing::Matcher(ComparePerfHint("LATENCY")))) .Times(0); } @@ -150,12 +149,16 @@ class AutoCTPUTCallMulti : public tests::AutoTest, public ::testing::TestWithPar void SetUp() override { std::vector availableDevs = {"CPU", "GPU"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) .WillByDefault(Return(mockExeNetwork)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } }; @@ -176,21 +179,21 @@ TEST_P(AutoCTPUTCallMulti, CTPUTDeviceLoadFailedNoExceptionThrowTest) { config.insert(ov::device::priorities(targetDev)); ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(loadFailedDevice)), - ::testing::Matcher(_))) + ::testing::Matcher(StrEq(loadFailedDevice)), + ::testing::Matcher(_))) .WillByDefault(Throw(InferenceEngine::GeneralError{""})); if (loadFailedDevice != ov::test::utils::DEVICE_CPU) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_CPU), - ::testing::Matcher(_))) + ::testing::Matcher(ov::test::utils::DEVICE_CPU), + ::testing::Matcher(_))) .Times(1); } if (loadFailedDevice != ov::test::utils::DEVICE_GPU) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_GPU), - ::testing::Matcher(_))) + ::testing::Matcher(ov::test::utils::DEVICE_GPU), + ::testing::Matcher(_))) .Times(1); } ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); diff --git a/src/plugins/auto/tests/unit/default_perf_hint_test.cpp b/src/plugins/auto/tests/unit/default_perf_hint_test.cpp index 731c4e5ab34e88..332d55a10ee880 100644 --- a/src/plugins/auto/tests/unit/default_perf_hint_test.cpp +++ b/src/plugins/auto/tests/unit/default_perf_hint_test.cpp @@ -7,12 +7,11 @@ using namespace ov::mock_auto_plugin; using ConfigParams = std::tuple, // hardware device name to expect loading network on - ov::AnyMap>; // secondary property setting to device + ov::AnyMap>; // secondary property setting to device static std::vector testConfigs; -class AutoDefaultPerfHintTest : public tests::AutoTest, - public ::testing::TestWithParam { +class AutoDefaultPerfHintTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::string deviceName; @@ -37,35 +36,36 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, testConfigs.clear(); testConfigs.push_back( ConfigParams{"AUTO", {"CPU"}, {{"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get default_hint:lantency + testConfigs.push_back(ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint testConfigs.push_back( ConfigParams{"AUTO", - {"CPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint + {"CPU", "GPU"}, + {{"MULTI_DEVICE_PRIORITIES", + "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, - {{"MULTI_DEVICE_PRIORITIES", + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency - testConfigs.push_back(ConfigParams{ - "AUTO", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, - {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency testConfigs.push_back(ConfigParams{ "AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:no perf_hint - testConfigs.push_back( - ConfigParams{"AUTO", - {"CPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint + testConfigs.push_back(ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:5}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint testConfigs.push_back( ConfigParams{"AUTO", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get default_hint:lantency - testConfigs.push_back( - ConfigParams{"AUTO", - {"GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint + testConfigs.push_back(ConfigParams{"AUTO", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", @@ -91,30 +91,30 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, static std::vector CreatePerfHintAndDefaultPerfHintTestConfigs() { testConfigs.clear(); - testConfigs.push_back(ConfigParams{ - "AUTO", - {"CPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get perf_hint:tput + testConfigs.push_back(ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get perf_hint:tput testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get default_hint:lantency - testConfigs.push_back( - ConfigParams{"AUTO", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT},GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, - {"MULTI_DEVICE_PRIORITIES", - "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get perf_hint:tput + testConfigs.push_back(ConfigParams{ + "AUTO", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT},GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, + {"MULTI_DEVICE_PRIORITIES", + "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get perf_hint:tput testConfigs.push_back(ConfigParams{"AUTO", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get perf_hint:tput - testConfigs.push_back(ConfigParams{ - "AUTO", - {"GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get perf_hint:tput + testConfigs.push_back(ConfigParams{"AUTO", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get perf_hint:tput testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", @@ -136,30 +136,29 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, static std::vector CreateSecPropAndDefaultPerfHintTestConfigs() { testConfigs.clear(); - testConfigs.push_back(ConfigParams{ - "AUTO", - {"CPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint + testConfigs.push_back(ConfigParams{"AUTO", + {"CPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get default_hint:lantency - testConfigs.push_back( - ConfigParams{"AUTO", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, - {"MULTI_DEVICE_PRIORITIES", - "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:no perf_hint + testConfigs.push_back(ConfigParams{ + "AUTO", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:no perf_hint testConfigs.push_back(ConfigParams{"AUTO", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint - testConfigs.push_back(ConfigParams{ - "AUTO", - {"GPU"}, - {{"DEVICE_PROPERTIES", "{GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint + testConfigs.push_back(ConfigParams{"AUTO", + {"GPU"}, + {{"DEVICE_PROPERTIES", "{GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, + {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", @@ -171,11 +170,11 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get default_hint:tput GPU: get default_hint:tput - testConfigs.push_back(ConfigParams{ - "MULTI:CPU,GPU", - {"CPU", "GPU"}, - {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, - {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: get default_hint:tput + testConfigs.push_back( + ConfigParams{"MULTI:CPU,GPU", + {"CPU", "GPU"}, + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, + {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: get default_hint:tput return testConfigs; } @@ -183,13 +182,17 @@ class AutoDefaultPerfHintTest : public tests::AutoTest, std::vector availableDevs = {"CPU", "GPU"}; ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("CPU")), _)) - .WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("CPU")), + _)) + .WillByDefault(Return(mockExeNetwork)); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU")), _)) - .WillByDefault(Return(mockExeNetworkActual)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU")), + _)) + .WillByDefault(Return(mockExeNetworkActual)); } }; @@ -246,21 +249,21 @@ TEST_P(NumStreamsAndDefaultPerfHintMockTest, NumStreamsAndDefaultPerfHintTest) { // do not pass default perf_hint to HW HW_PerfHint = "No PERFORMANCE_HINT"; } - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(deviceName), - ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(deviceName), + ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) .Times(1); } ASSERT_NO_THROW(plugin->compile_model(model, config)); } -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiMock_NumStreamsAndDefaultPerfHintToHWTest, - NumStreamsAndDefaultPerfHintMockTest, - ::testing::ValuesIn(NumStreamsAndDefaultPerfHintMockTest::CreateNumStreamsAndDefaultPerfHintTestConfigs()), - NumStreamsAndDefaultPerfHintMockTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_AutoMultiMock_NumStreamsAndDefaultPerfHintToHWTest, + NumStreamsAndDefaultPerfHintMockTest, + ::testing::ValuesIn(NumStreamsAndDefaultPerfHintMockTest::CreateNumStreamsAndDefaultPerfHintTestConfigs()), + NumStreamsAndDefaultPerfHintMockTest::getTestCaseName); TEST_P(PerHintAndDefaultPerfHintMockTest, PerfHintAndDefaultPerfHintTest) { std::string device; @@ -309,21 +312,21 @@ TEST_P(PerHintAndDefaultPerfHintMockTest, PerfHintAndDefaultPerfHintTest) { if (itor != deviceConfigs.end() && !isCPUHelper) { HW_PerfHint = itor->second.as(); } - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(deviceName)), - ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(deviceName)), + ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) .Times(1); } ASSERT_NO_THROW(plugin->compile_model(model, config)); } -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiMock_PerHintAndDefaultPerfHintToHWTest, - PerHintAndDefaultPerfHintMockTest, - ::testing::ValuesIn(PerHintAndDefaultPerfHintMockTest::CreatePerfHintAndDefaultPerfHintTestConfigs()), - PerHintAndDefaultPerfHintMockTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_AutoMultiMock_PerHintAndDefaultPerfHintToHWTest, + PerHintAndDefaultPerfHintMockTest, + ::testing::ValuesIn(PerHintAndDefaultPerfHintMockTest::CreatePerfHintAndDefaultPerfHintTestConfigs()), + PerHintAndDefaultPerfHintMockTest::getTestCaseName); TEST_P(SecPropAndDefaultPerfHintMockTest, SecPropAndDefaultPerfHintTest) { std::string device; @@ -372,18 +375,18 @@ TEST_P(SecPropAndDefaultPerfHintMockTest, SecPropAndDefaultPerfHintTest) { } } } - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(deviceName)), - ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(deviceName)), + ::testing::Matcher(ComparePerfHint(HW_PerfHint)))) .Times(1); } ASSERT_NO_THROW(plugin->compile_model(model, config)); } -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiMock_SecPropAndDefaultPerfHintToHWTest, - SecPropAndDefaultPerfHintMockTest, - ::testing::ValuesIn(SecPropAndDefaultPerfHintMockTest::CreateSecPropAndDefaultPerfHintTestConfigs()), - SecPropAndDefaultPerfHintMockTest::getTestCaseName); \ No newline at end of file +INSTANTIATE_TEST_SUITE_P( + smoke_AutoMultiMock_SecPropAndDefaultPerfHintToHWTest, + SecPropAndDefaultPerfHintMockTest, + ::testing::ValuesIn(SecPropAndDefaultPerfHintMockTest::CreateSecPropAndDefaultPerfHintTestConfigs()), + SecPropAndDefaultPerfHintMockTest::getTestCaseName); \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/dynamic_output_test.cpp b/src/plugins/auto/tests/unit/dynamic_output_test.cpp index 0ff5c35c116d1d..afade1a3d1d4e8 100644 --- a/src/plugins/auto/tests/unit/dynamic_output_test.cpp +++ b/src/plugins/auto/tests/unit/dynamic_output_test.cpp @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include +#include + #include "include/auto_unit_test.hpp" using DynamicOutputConfigParams = std::tuple< @@ -11,8 +12,7 @@ using DynamicOutputConfigParams = std::tuple< ov::Any // expected device to run inference on >; -class DynamicOutputInferenceTest : public tests::AutoTest, - public ::testing::TestWithParam { +class DynamicOutputInferenceTest : public tests::AutoTest, public ::testing::TestWithParam { public: std::shared_ptr create_dynamic_output_model(); static std::string getTestCaseName(testing::TestParamInfo obj); @@ -45,27 +45,35 @@ std::shared_ptr DynamicOutputInferenceTest::create_dynamic_output_mod auto scores = std::make_shared(ov::element::f32, ov::Shape{1, 1, 2}); scores->set_friendly_name("param_2"); scores->get_output_tensor(0).set_names({"input_tensor_2"}); - auto max_output_boxes_per_class = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {10}); + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {10}); auto iou_threshold = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {0.75}); auto score_threshold = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {0.7}); - auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, - iou_threshold, score_threshold); + auto nms = std::make_shared(boxes, + scores, + max_output_boxes_per_class, + iou_threshold, + score_threshold); auto res = std::make_shared(nms); res->set_friendly_name("output_dynamic"); return std::make_shared(ov::NodeVector{nms}, ov::ParameterVector{boxes, scores}); } void DynamicOutputInferenceTest::SetUp() { - model = create_dynamic_output_model(); - std::tie(priorityList, targetList) = GetParam(); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkActual; })); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), - (_))).WillByDefault(Return(mockExeNetwork)); + model = create_dynamic_output_model(); + std::tie(priorityList, targetList) = GetParam(); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkActual; + })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + (_))) + .WillByDefault(Return(mockExeNetwork)); } TEST_P(DynamicOutputInferenceTest, CanSelectCorrectTargetDeviceandInitizeBlobWithCorrectSize) { @@ -74,27 +82,26 @@ TEST_P(DynamicOutputInferenceTest, CanSelectCorrectTargetDeviceandInitizeBlobWit config.insert(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); std::shared_ptr exeNetwork; for (auto& iter : targets) { - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(HasSubstr(iter)), - ::testing::Matcher(_))) - .Times(1); + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(HasSubstr(iter)), + ::testing::Matcher(_))) + .Times(1); } - EXPECT_CALL( - *core, + EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(HasSubstr("GPU")), - ::testing::Matcher(_))) - .Times(0); + ::testing::Matcher(HasSubstr("GPU")), + ::testing::Matcher(_))) + .Times(0); ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); } const std::vector testConfigs = { - DynamicOutputConfigParams {"CPU,GPU", std::vector{"CPU"}}, - DynamicOutputConfigParams {"GPU,CPU", std::vector{"CPU"}}, + DynamicOutputConfigParams{"CPU,GPU", std::vector{"CPU"}}, + DynamicOutputConfigParams{"GPU,CPU", std::vector{"CPU"}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, DynamicOutputInferenceTest, - ::testing::ValuesIn(testConfigs), - DynamicOutputInferenceTest::getTestCaseName); \ No newline at end of file +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + DynamicOutputInferenceTest, + ::testing::ValuesIn(testConfigs), + DynamicOutputInferenceTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/get_device_list.cpp b/src/plugins/auto/tests/unit/get_device_list.cpp index 5fc8d4eedb4b43..59c6babb8f19c9 100644 --- a/src/plugins/auto/tests/unit/get_device_list.cpp +++ b/src/plugins/auto/tests/unit/get_device_list.cpp @@ -10,10 +10,9 @@ using namespace ov::mock_auto_plugin; const std::vector availableDevs = {"CPU", "GPU", "NPU"}; const std::vector availableDevsWithId = {"CPU", "GPU.0", "GPU.1", "NPU"}; using Params = std::tuple; -using ConfigParams = std::tuple< - std::vector, // Available devices retrieved from Core - Params // Params {devicePriority, expect metaDevices} - >; +using ConfigParams = std::tuple, // Available devices retrieved from Core + Params // Params {devicePriority, expect metaDevices} + >; class GetDeviceListTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { @@ -36,10 +35,9 @@ class GetDeviceListTest : public tests::AutoTest, public ::testing::TestWithPara } void SetUp() override { - ON_CALL(*plugin, get_device_list).WillByDefault([this]( - const ov::AnyMap& config) { - return plugin->Plugin::get_device_list(config); - }); + ON_CALL(*plugin, get_device_list).WillByDefault([this](const ov::AnyMap& config) { + return plugin->Plugin::get_device_list(config); + }); } }; @@ -76,8 +74,8 @@ TEST_P(GetDeviceListTestWithNotInteldGPU, GetDeviceListTestWithExcludeList) { ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); std::string dgpuArchitecture = "GPU: vendor=0x10DE arch=0"; - ON_CALL(*core, get_property(StrEq("GPU.1"), - StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture)); + ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::architecture.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture)); EXPECT_CALL(*core, get_available_devices()).Times(1); if (metaDevices == "") { EXPECT_THROW(plugin->get_device_list({ov::device::priorities(priorityDevices)}), ov::Exception); @@ -88,29 +86,30 @@ TEST_P(GetDeviceListTestWithNotInteldGPU, GetDeviceListTestWithExcludeList) { } } -const std::vector testConfigsWithId = {Params{" ", " "}, - Params{"", "CPU,GPU.0,GPU.1"}, - Params{"CPU, ", "CPU, "}, - Params{" ,CPU", " ,CPU"}, - Params{"CPU,", "CPU"}, - Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"}, - Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"}, - Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"}, - Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"}, - Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"}, - Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"}, - Params{"-GPU.0,GPU", "GPU.1"}, - Params{"-GPU,GPU.0", "GPU.0"}, - Params{"-GPU.0", "CPU,GPU.1"}, - Params{"-GPU.0,-GPU.1", "CPU"}, - Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"}, - Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"}, - Params{"-GPU.0,-GPU.1,-CPU", ""}, - Params{"GPU,-GPU.0", "GPU.1"}, - Params{"-GPU,CPU", "CPU"}, - Params{"-GPU,-CPU", ""}, - Params{"GPU.0,-GPU", "GPU.0"}, - Params{"-GPU.0,-CPU", "GPU.1"}}; +const std::vector testConfigsWithId = { + Params{" ", " "}, + Params{"", "CPU,GPU.0,GPU.1"}, + Params{"CPU, ", "CPU, "}, + Params{" ,CPU", " ,CPU"}, + Params{"CPU,", "CPU"}, + Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"}, + Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"}, + Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"}, + Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"}, + Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"}, + Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"}, + Params{"-GPU.0,GPU", "GPU.1"}, + Params{"-GPU,GPU.0", "GPU.0"}, + Params{"-GPU.0", "CPU,GPU.1"}, + Params{"-GPU.0,-GPU.1", "CPU"}, + Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"}, + Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"}, + Params{"-GPU.0,-GPU.1,-CPU", ""}, + Params{"GPU,-GPU.0", "GPU.1"}, + Params{"-GPU,CPU", "CPU"}, + Params{"-GPU,-CPU", ""}, + Params{"GPU.0,-GPU", "GPU.0"}, + Params{"-GPU.0,-CPU", "GPU.1"}}; const std::vector testConfigs = {Params{" ", " "}, Params{"", "CPU,GPU"}, @@ -139,35 +138,36 @@ const std::vector testConfigs = {Params{" ", " "}, Params{"-CPU,INVALID_DEVICE", "INVALID_DEVICE"}, Params{"CPU,GPU,NPU", "CPU,GPU,NPU"}}; -const std::vector testConfigsWithIdNotInteldGPU = {Params{" ", " "}, - Params{"", "CPU,GPU.0"}, - Params{"CPU, ", "CPU, "}, - Params{" ,CPU", " ,CPU"}, - Params{"CPU,", "CPU"}, - Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"}, - Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"}, - Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"}, - Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"}, - Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"}, - Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"}, - Params{"-GPU.0,GPU", "GPU.1"}, - Params{"-GPU,GPU.0", "GPU.0"}, - Params{"-GPU.0", "CPU"}, - Params{"-GPU.0,-GPU.1", "CPU"}, - Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"}, - Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"}, - Params{"-GPU.0,-GPU.1,-CPU", ""}, - Params{"GPU,-GPU.0", "GPU.1"}, - Params{"GPU.0,-GPU", "GPU.0"}, - Params{"GPU", "GPU.0,GPU.1"}, - Params{"GPU.0", "GPU.0"}, - Params{"GPU.1", "GPU.1"}, - Params{"-CPU", "GPU.0"}, - Params{"-CPU,-GPU", ""}, - Params{"-CPU,-GPU.0", ""}, - Params{"-CPU,-GPU.1", "GPU.0"}, - Params{"-GPU,CPU", "CPU"}, - Params{"-GPU.0,-CPU", ""}}; +const std::vector testConfigsWithIdNotInteldGPU = { + Params{" ", " "}, + Params{"", "CPU,GPU.0"}, + Params{"CPU, ", "CPU, "}, + Params{" ,CPU", " ,CPU"}, + Params{"CPU,", "CPU"}, + Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"}, + Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"}, + Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"}, + Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"}, + Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"}, + Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"}, + Params{"-GPU.0,GPU", "GPU.1"}, + Params{"-GPU,GPU.0", "GPU.0"}, + Params{"-GPU.0", "CPU"}, + Params{"-GPU.0,-GPU.1", "CPU"}, + Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"}, + Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"}, + Params{"-GPU.0,-GPU.1,-CPU", ""}, + Params{"GPU,-GPU.0", "GPU.1"}, + Params{"GPU.0,-GPU", "GPU.0"}, + Params{"GPU", "GPU.0,GPU.1"}, + Params{"GPU.0", "GPU.0"}, + Params{"GPU.1", "GPU.1"}, + Params{"-CPU", "GPU.0"}, + Params{"-CPU,-GPU", ""}, + Params{"-CPU,-GPU.0", ""}, + Params{"-CPU,-GPU.1", "GPU.0"}, + Params{"-GPU,CPU", "CPU"}, + Params{"-GPU.0,-CPU", ""}}; INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_GetDeviceListWithID, GetDeviceListTest, @@ -182,8 +182,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_GetDeviceList, INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_GetDeviceListNotInteldGPU, GetDeviceListTestWithNotInteldGPU, - ::testing::Combine(::testing::Values(availableDevsWithId), ::testing::ValuesIn(testConfigsWithIdNotInteldGPU)), + ::testing::Combine(::testing::Values(availableDevsWithId), + ::testing::ValuesIn(testConfigsWithIdNotInteldGPU)), GetDeviceListTestWithNotInteldGPU::getTestCaseName); -//toDo need add test for ParseMetaDevices(_, config) to check device config of -//return metaDevices +// toDo need add test for ParseMetaDevices(_, config) to check device config of +// return metaDevices diff --git a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp index 1142c7d871cad0..02043f1e45a1d7 100644 --- a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp +++ b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp @@ -3,43 +3,31 @@ // #pragma once -#include #include +#include + +#include #include -#include "plugin.hpp" -#include "openvino/runtime/core.hpp" + #include "gmock_plugin.hpp" -#include "mock_common.hpp" -#include +#include "openvino/runtime/core.hpp" +#include "plugin.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_isync_infer_request.hpp" -using ::testing::MatcherCast; -using ::testing::AllOf; -using ::testing::Throw; -using ::testing::Matches; -using ::testing::_; -using ::testing::StrEq; -using ::testing::StrNe; -using ::testing::Return; -using ::testing::Property; -using ::testing::Eq; -using ::testing::ReturnRef; -using ::testing::AtLeast; -using ::testing::AnyNumber; -using ::testing::InvokeWithoutArgs; -using ::testing::HasSubstr; -using ::testing::NiceMock; +using namespace ::testing; using namespace ov::mock_auto_plugin; -#define EXPECT_THROW_WITH_MESSAGE(stmt, etype, whatstring) EXPECT_THROW( \ - try { \ - stmt; \ - } catch (const etype& ex) { \ +#define EXPECT_THROW_WITH_MESSAGE(stmt, etype, whatstring) \ + EXPECT_THROW( \ + try { stmt; } catch (const etype& ex) { \ EXPECT_THAT(std::string(ex.what()), HasSubstr(whatstring)); \ - throw; \ - } \ - , etype) + throw; \ + }, \ + etype) // define a matcher to check if perf hint expects MATCHER_P(ComparePerfHint, perfHint, "Check if perf hint expects.") { @@ -51,28 +39,51 @@ MATCHER_P(ComparePerfHint, perfHint, "Check if perf hint expects.") { return perfHint == arg_perfHint.as(); } + +#define IE_SET_METRIC(key, name, ...) \ + typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::key>::type name = __VA_ARGS__; + +#define RETURN_MOCK_VALUE(value) \ + InvokeWithoutArgs([value]() { \ + return ov::Any(value); \ + }) + +// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any +// it will cause core dump, so add this special implemented +namespace testing { +namespace internal { +template <> +void PrintTo(const ov::Any& a, std::ostream* os); +} +} // namespace testing + +#define ENABLE_LOG_IN_MOCK() \ + ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { \ + std::cout << stream.str() << std::endl; \ + }); + namespace ov { namespace mock_auto_plugin { namespace tests { - class BaseTest { public: - std::shared_ptr model; - std::shared_ptr> mock_plugin_cpu; - std::shared_ptr> mock_plugin_gpu; - std::shared_ptr> plugin; - //mock exeNetwork helper - ov::SoPtr mockExeNetwork; - std::shared_ptr mockIExeNet; - //mock exeNetwork actual - ov::SoPtr mockExeNetworkActual; - std::shared_ptr mockIExeNetActual; + std::shared_ptr model; + std::shared_ptr model_can_batch; + std::shared_ptr> mock_plugin_cpu; + std::shared_ptr> mock_plugin_gpu; + std::shared_ptr> plugin; + // mock exeNetwork helper + ov::SoPtr mockExeNetwork; + std::shared_ptr mockIExeNet; + // mock exeNetwork actual + ov::SoPtr mockExeNetworkActual; + std::shared_ptr mockIExeNetActual; // config for Auto device - ov::AnyMap config; - std::vector metaDevices; - std::shared_ptr inferReqInternal; - std::shared_ptr inferReqInternalActual; + ov::AnyMap config; + std::vector metaDevices; + std::shared_ptr inferReqInternal; + std::shared_ptr inferReqInternalActual; ov::Any optimalNum; virtual ~BaseTest(); @@ -84,32 +95,10 @@ class BaseTest { // for auto unit tests which can covered by mock core, or need to test with gmock icore class AutoTest : public BaseTest { public: - std::shared_ptr> core; + std::shared_ptr> core; AutoTest(); ~AutoTest(); }; - -// for unit tests which requires real core, batch support or remote context -// mock plugin name: MOCK_CPU,MOCK_HARDWARE -// please extend as needed - -class AutoTestWithRealCore : public BaseTest { -public: - AutoTestWithRealCore(); - ~AutoTestWithRealCore() = default; - ov::Core core; - -protected: - void register_plugin_simple(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties); - void register_plugin_support_batch_and_context(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties); - std::vector> m_mock_contexts; - std::shared_ptr m_so; - std::shared_ptr compiled_model; - void reg_plugin(ov::Core& core, - std::shared_ptr plugin, - const std::string& device_name, - const ov::AnyMap& properties); -}; } // namespace tests } // namespace mock_auto_plugin } // namespace ov diff --git a/src/plugins/auto/tests/unit/include/gmock_plugin.hpp b/src/plugins/auto/tests/unit/include/gmock_plugin.hpp index 01d46dddf5d001..71b9f3269bb9d7 100644 --- a/src/plugins/auto/tests/unit/include/gmock_plugin.hpp +++ b/src/plugins/auto/tests/unit/include/gmock_plugin.hpp @@ -4,9 +4,11 @@ #pragma once #include + +#include + #include "openvino/runtime/core.hpp" #include "plugin.hpp" -#include using namespace ov::mock_auto_plugin; namespace ov { @@ -19,10 +21,47 @@ class MockAutoPlugin : public Plugin { get_valid_device, ((const std::vector&), const std::string&), (const, override)); - MOCK_METHOD(DeviceInformation, select_device, ((const std::vector&), - const std::string&, unsigned int), (override)); - MOCK_METHOD((std::vector), parse_meta_devices, - (const std::string&, const ov::AnyMap&), (const, override)); + MOCK_METHOD(DeviceInformation, + select_device, + ((const std::vector&), const std::string&, unsigned int), + (override)); + MOCK_METHOD((std::vector), + parse_meta_devices, + (const std::string&, const ov::AnyMap&), + (const, override)); +}; + +class MockISyncInferRequest : public ISyncInferRequest { +public: + MockISyncInferRequest(const std::shared_ptr& compiled_model); + MOCK_METHOD(std::vector, get_profiling_info, (), (const, override)); + MOCK_METHOD(void, infer, (), (override)); + MOCK_METHOD(std::vector>, query_state, (), (const, override)); + ~MockISyncInferRequest() = default; + +private: + void allocate_tensor_impl(ov::SoPtr& tensor, + const ov::element::Type& element_type, + const ov::Shape& shape); +}; + +class MockAsyncInferRequest : public IAsyncInferRequest { +public: + MockAsyncInferRequest(const std::shared_ptr& request, + const std::shared_ptr& task_executor, + const std::shared_ptr& callback_executor, + bool ifThrow) + : IAsyncInferRequest(request, task_executor, callback_executor), + m_throw(ifThrow) { + m_pipeline = {}; + m_pipeline.push_back({task_executor, [this] { + if (m_throw) + OPENVINO_THROW("runtime inference failure"); + }}); + } + +private: + bool m_throw; }; -} // namespace mock_auto_plugin -} // namespace ov +} // namespace mock_auto_plugin +} // namespace ov diff --git a/src/plugins/auto/tests/unit/include/mock_common.hpp b/src/plugins/auto/tests/unit/include/mock_common.hpp deleted file mode 100644 index 0bbf58d68b0f53..00000000000000 --- a/src/plugins/auto/tests/unit/include/mock_common.hpp +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include -#include -#include "openvino/runtime/iplugin.hpp" -#include "openvino/opsets/opset11.hpp" -#include "openvino/runtime/iasync_infer_request.hpp" -#include "openvino/runtime/iplugin.hpp" -#include "openvino/runtime/iremote_context.hpp" -#include "openvino/runtime/iremote_tensor.hpp" -#include "openvino/runtime/make_tensor.hpp" - -#define IE_SET_METRIC(key, name, ...) \ - typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::key>::type name = \ - __VA_ARGS__; - -#define RETURN_MOCK_VALUE(value) \ - InvokeWithoutArgs([value](){return ov::Any(value);}) - -// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any -// it will cause core dump, so add this special implemented -namespace testing { -namespace internal { - template<> - void PrintTo(const ov::Any& a, std::ostream* os); -} -} - -#define ENABLE_LOG_IN_MOCK() \ - ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { \ - std::cout << stream.str() << std::endl; \ - }); - -namespace ov { -class MockPluginBase : public ov::IPlugin { -public: - MOCK_METHOD(std::shared_ptr, compile_model, ((const std::shared_ptr&), (const ov::AnyMap&)), (const, override)); - MOCK_METHOD(std::shared_ptr, compile_model, - ((const std::shared_ptr&), (const ov::AnyMap&), (const ov::SoPtr&)), (const, override)); - MOCK_METHOD(void, set_property, (const AnyMap&), (override)); - MOCK_METHOD(ov::Any, get_property, ((const std::string&), (const ov::AnyMap&)), (const, override)); - MOCK_METHOD(ov::SoPtr, create_context, (const ov::AnyMap&), (const, override)); - MOCK_METHOD(ov::SoPtr, get_default_context, (const ov::AnyMap&), (const, override)); - MOCK_METHOD(std::shared_ptr, import_model, ((std::istream&), (const ov::AnyMap&)), (const, override)); - MOCK_METHOD(std::shared_ptr, import_model, - ((std::istream&), (const ov::SoPtr&), (const ov::AnyMap&)), (const, override)); - MOCK_METHOD(ov::SupportedOpsMap, query_model, ((const std::shared_ptr&), (const ov::AnyMap&)), (const, override)); -}; - -class MockCompiledModel : public ICompiledModel { -public: - MockCompiledModel(const std::shared_ptr& model, const std::shared_ptr& plugin) - : ICompiledModel(model, plugin) {} - MOCK_METHOD(std::shared_ptr, create_sync_infer_request, (), (const, override)); - MOCK_METHOD(Any, get_property, (const std::string&), (const, override)); - MOCK_METHOD(void, set_property, (const AnyMap&), (override)); - MOCK_METHOD(void, export_model, (std::ostream&), (const, override)); - MOCK_METHOD(std::shared_ptr, get_runtime_model, (), (const, override)); - MOCK_METHOD(std::shared_ptr, create_infer_request, (), (const, override)); -}; - -class MockAsyncInferRequest : public IAsyncInferRequest { -public: - MockAsyncInferRequest(const std::shared_ptr& request, - const std::shared_ptr& task_executor, - const std::shared_ptr& callback_executor, - bool ifThrow); -private: - bool m_throw; -}; - -class MockSyncInferRequest : public ISyncInferRequest { -public: - MockSyncInferRequest(const std::shared_ptr& compiled_model); - MOCK_METHOD(std::vector, get_profiling_info, (), (const, override)); - //MOCK_METHOD(Tensor, get_tensor, (const Output&), (const, override)); - //MOCK_METHOD(void, set_tensor, (const Output&, const Tensor&), (override)); - //MOCK_METHOD(std::vector, get_tensors, (const Output&), (const, override)); - //MOCK_METHOD(void, set_tensors, (const Output&, const std::vector&), (override)); - MOCK_METHOD(void, infer, (), (override)); - MOCK_METHOD(std::vector>, query_state, (), (const, override)); - //MOCK_METHOD(const std::shared_ptr&, get_compiled_model, (), (const, override)); - //MOCK_METHOD(const std::vector>&, get_inputs, (), (const, override)); - //MOCK_METHOD(const std::vector>&, get_outputs, (), (const, override)); - //MOCK_METHOD(void, check_tensors, (), (const, override)); - ~MockSyncInferRequest() = default; - -private: - void allocate_tensor_impl(ov::SoPtr& tensor, const ov::element::Type& element_type, const ov::Shape& shape); -}; - -class MockRemoteTensor : public ov::IRemoteTensor { - ov::AnyMap m_properties; - std::string m_dev_name; - -public: - MockRemoteTensor(const std::string& name, const ov::AnyMap& props) : m_properties(props), m_dev_name(name) {} - const ov::AnyMap& get_properties() const override { - return m_properties; - } - const std::string& get_device_name() const override { - return m_dev_name; - } - void set_shape(ov::Shape shape) override { - OPENVINO_NOT_IMPLEMENTED; - } - - const ov::element::Type& get_element_type() const override { - OPENVINO_NOT_IMPLEMENTED; - } - - const ov::Shape& get_shape() const override { - OPENVINO_NOT_IMPLEMENTED; - } - - const ov::Strides& get_strides() const override { - OPENVINO_NOT_IMPLEMENTED; - } -}; - -class MockRemoteContext : public ov::IRemoteContext { - ov::AnyMap m_property = {{"IS_DEFAULT", true}}; - std::string m_dev_name; - -public: - MockRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {} - const std::string& get_device_name() const override { - return m_dev_name; - } - - const ov::AnyMap& get_property() const override { - OPENVINO_NOT_IMPLEMENTED; - } - - ov::SoPtr create_tensor(const ov::element::Type& type, - const ov::Shape& shape, - const ov::AnyMap& params = {}) override { - auto remote_tensor = std::make_shared(m_dev_name, m_property); - return {remote_tensor, nullptr}; - } -}; -} // namespace ov diff --git a/src/plugins/auto/tests/unit/include/mock_log_utils.hpp b/src/plugins/auto/tests/unit/include/mock_log_utils.hpp index 80383b42ff6fc0..51b0a7f4e622ad 100644 --- a/src/plugins/auto/tests/unit/include/mock_log_utils.hpp +++ b/src/plugins/auto/tests/unit/include/mock_log_utils.hpp @@ -4,15 +4,15 @@ #pragma once #include + #include "utils/log.hpp" namespace ov { namespace mock_auto_plugin { class MockLog : public Log { public: - MOCK_METHOD(void, print, (std::stringstream& stream), (override)); - MockLog(std::string unittest):Log(unittest) { - } + MOCK_METHOD(void, print, (std::stringstream & stream), (override)); + MockLog(std::string unittest) : Log(unittest) {} static MockLog* get_instance() { if (m_mocklog == NULL) { m_mocklog = new MockLog("unittest"); @@ -27,5 +27,5 @@ class MockLog : public Log { } static MockLog* m_mocklog; }; -}// namespace mock_auto_plugin -} //namespace ov +} // namespace mock_auto_plugin +} // namespace ov diff --git a/src/plugins/auto/tests/unit/key_network_priority_test.cpp b/src/plugins/auto/tests/unit/key_network_priority_test.cpp index e284aedc6572c7..616f14040486b6 100644 --- a/src/plugins/auto/tests/unit/key_network_priority_test.cpp +++ b/src/plugins/auto/tests/unit/key_network_priority_test.cpp @@ -7,16 +7,15 @@ using Config = std::map; using namespace ov::mock_auto_plugin; -using PriorityParams = std::tuple; //{modelpriority, deviceUniquName} +using PriorityParams = std::tuple; //{modelpriority, deviceUniquName} -using ConfigParams = std::tuple< - std::string, // netPrecision - bool, // enable device priority - std::vector // {{modelpriority, expect device unique_name}} - >; +using ConfigParams = std::tuple // {{modelpriority, expect device unique_name}} + >; class KeyNetworkPriorityTest : public tests::AutoTest, public ::testing::TestWithParam { public: - std::vector metaDevices; + std::vector metaDevices; public: static std::string getTestCaseName(testing::TestParamInfo obj) { @@ -31,8 +30,8 @@ class KeyNetworkPriorityTest : public tests::AutoTest, public ::testing::TestWit result << "_enableDevicePriority_false"; } for (auto& item : PriorityConfigs) { - result << "_priority_" << std::get<0>(item); - result << "_return_" << std::get<1>(item); + result << "_priority_" << std::get<0>(item); + result << "_return_" << std::get<1>(item); } result << "netPrecision_" << netPrecision; return result.str(); @@ -45,9 +44,9 @@ class KeyNetworkPriorityTest : public tests::AutoTest, public ::testing::TestWit void SetUp() override { std::tie(netPrecision, enableDevicePriority, PriorityConfigs) = GetParam(); sizeOfConfigs = static_cast(PriorityConfigs.size()); - std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN"}; - ON_CALL(*core, get_property(HasSubstr("GPU"), - StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(gpuCability)); + std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN"}; + ON_CALL(*core, get_property(HasSubstr("GPU"), StrEq(ov::device::capabilities.name()), _)) + .WillByDefault(RETURN_MOCK_VALUE(gpuCability)); std::vector otherCability = {"INT8"}; ON_CALL(*core, get_property(HasSubstr("OTHER"), StrEq(ov::device::capabilities.name()), _)) @@ -69,14 +68,14 @@ TEST_P(KeyNetworkPriorityTest, SelectDevice) { std::vector resDevInfo; if (enableDevicePriority) { metaDevices = {{ov::test::utils::DEVICE_CPU, {}, 2, "", "CPU_01", 0}, - {"GPU.0", {}, 2, "01", "iGPU_01", 1}, - {"GPU.1", {}, 2, "01", "dGPU_01", 2}, - {"OTHER", {}, 2, "01", "OTHER_01", 3}}; + {"GPU.0", {}, 2, "01", "iGPU_01", 1}, + {"GPU.1", {}, 2, "01", "dGPU_01", 2}, + {"OTHER", {}, 2, "01", "OTHER_01", 3}}; } else { metaDevices = {{ov::test::utils::DEVICE_CPU, {}, 2, "", "CPU_01", 0}, - {"GPU.0", {}, 2, "01", "iGPU_01", 0}, - {"GPU.1", {}, 2, "01", "dGPU_01", 0}, - {"OTHER", {}, 2, "01", "OTHER_01", 0}}; + {"GPU.0", {}, 2, "01", "iGPU_01", 0}, + {"GPU.1", {}, 2, "01", "dGPU_01", 0}, + {"OTHER", {}, 2, "01", "OTHER_01", 0}}; } EXPECT_CALL(*plugin, select_device(_, _, _)).Times(sizeOfConfigs); @@ -110,16 +109,16 @@ TEST_P(KeyNetworkPriorityTest, MultiThreadsSelectDevice) { // selectdevice in multi threads, and UnregisterPriority them all, should not affect the // Priority Map for (auto& item : PriorityConfigs) { - unsigned int priority = std::get<0>(item); - auto future = std::async(std::launch::async, [this, priority] { - auto deviceInfo = plugin->select_device(metaDevices, netPrecision, priority); - plugin->unregister_priority(priority, deviceInfo.unique_name); - }); - futureVect.push_back(std::move(future)); + unsigned int priority = std::get<0>(item); + auto future = std::async(std::launch::async, [this, priority] { + auto deviceInfo = plugin->select_device(metaDevices, netPrecision, priority); + plugin->unregister_priority(priority, deviceInfo.unique_name); + }); + futureVect.push_back(std::move(future)); } for (auto& item : futureVect) { - item.get(); + item.get(); } for (auto& item : PriorityConfigs) { @@ -138,152 +137,206 @@ TEST_P(KeyNetworkPriorityTest, MultiThreadsSelectDevice) { // {netPrecision, enableDevicePriority, PriorityParamsVector{{modelpriority, expect device unique_name}}} const std::vector testConfigs = { - ConfigParams {"FP32", false, {PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"FP32", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {3, "iGPU_01"}, - PriorityParams {4, "CPU_01"}}}, - ConfigParams {"FP32", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {2, "iGPU_01"}}}, - ConfigParams {"FP32", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {3, "CPU_01"}}}, - ConfigParams {"FP32", false, {PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {0, "OTHER_01"}, - PriorityParams {1, "CPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {2, "OTHER_01"}, - PriorityParams {3, "CPU_01"}, - PriorityParams {4, "CPU_01"}, - PriorityParams {5, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {2, "OTHER_01"}, - PriorityParams {0, "OTHER_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {2, "OTHER_01"}, - PriorityParams {0, "OTHER_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}}}, - ConfigParams {"INT8", false, {PriorityParams {0, "OTHER_01"}, - PriorityParams {1, "CPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}, - PriorityParams {0, "OTHER_01"}, - PriorityParams {1, "CPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {2, "CPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {3, "iGPU_01"}, - PriorityParams {4, "CPU_01"}, - PriorityParams {5, "CPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {2, "iGPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {3, "CPU_01"}}}, - ConfigParams {"BIN", false, {PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}, - PriorityParams {0, "dGPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "CPU_01"}, - PriorityParams {3, "CPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{2, "dGPU_01"}, PriorityParams{3, "iGPU_01"}, PriorityParams{4, "CPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{2, "iGPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{3, "CPU_01"}}}, + ConfigParams{"FP32", + false, + {PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{0, "OTHER_01"}, + PriorityParams{1, "CPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{2, "OTHER_01"}, + PriorityParams{3, "CPU_01"}, + PriorityParams{4, "CPU_01"}, + PriorityParams{5, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{2, "OTHER_01"}, + PriorityParams{0, "OTHER_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{2, "OTHER_01"}, + PriorityParams{0, "OTHER_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}}}, + ConfigParams{"INT8", + false, + {PriorityParams{0, "OTHER_01"}, + PriorityParams{1, "CPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}, + PriorityParams{0, "OTHER_01"}, + PriorityParams{1, "CPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{2, "CPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{3, "iGPU_01"}, + PriorityParams{4, "CPU_01"}, + PriorityParams{5, "CPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{2, "iGPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{3, "CPU_01"}}}, + ConfigParams{"BIN", + false, + {PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}, + PriorityParams{0, "dGPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "CPU_01"}, + PriorityParams{3, "CPU_01"}}}, // metaDevices = {{ov::test::utils::DEVICE_CPU, {}, 2, "", "CPU_01", 0}, // {ov::test::utils::DEVICE_GPU, {}, 2, "01", "iGPU_01", 1}, // {ov::test::utils::DEVICE_GPU, {}, 2, "01", "dGPU_01", 2}, // cpu > igpu > dgpu > OTHER - ConfigParams {"FP32", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {2, "dGPU_01"}}}, - ConfigParams {"FP32", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {3, "iGPU_01"}, - PriorityParams {4, "dGPU_01"}}}, - ConfigParams {"FP32", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {2, "iGPU_01"}}}, - ConfigParams {"FP32", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {3, "dGPU_01"}}}, - ConfigParams {"FP32", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {3, "OTHER_01"}, - PriorityParams {4, "OTHER_01"}, - PriorityParams {5, "OTHER_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {3, "OTHER_01"}}}, - ConfigParams {"INT8", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {3, "OTHER_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {1, "OTHER_01"}, - PriorityParams {2, "OTHER_01"}, - PriorityParams {3, "OTHER_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {2, "dGPU_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {3, "iGPU_01"}, - PriorityParams {4, "dGPU_01"}, - PriorityParams {5, "dGPU_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {2, "iGPU_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {2, "CPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {2, "iGPU_01"}, - PriorityParams {3, "dGPU_01"}}}, - ConfigParams {"BIN", true, {PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {3, "dGPU_01"}, - PriorityParams {0, "CPU_01"}, - PriorityParams {1, "iGPU_01"}, - PriorityParams {2, "dGPU_01"}, - PriorityParams {3, "dGPU_01"}}} -}; - - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, KeyNetworkPriorityTest, - ::testing::ValuesIn(testConfigs), - KeyNetworkPriorityTest::getTestCaseName); + ConfigParams{"FP32", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{2, "dGPU_01"}}}, + ConfigParams{"FP32", + true, + {PriorityParams{2, "CPU_01"}, PriorityParams{3, "iGPU_01"}, PriorityParams{4, "dGPU_01"}}}, + ConfigParams{"FP32", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{2, "iGPU_01"}}}, + ConfigParams{"FP32", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{3, "dGPU_01"}}}, + ConfigParams{"FP32", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{3, "OTHER_01"}, + PriorityParams{4, "OTHER_01"}, + PriorityParams{5, "OTHER_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{3, "OTHER_01"}}}, + ConfigParams{"INT8", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{3, "OTHER_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{1, "OTHER_01"}, + PriorityParams{2, "OTHER_01"}, + PriorityParams{3, "OTHER_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{2, "dGPU_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{3, "iGPU_01"}, + PriorityParams{4, "dGPU_01"}, + PriorityParams{5, "dGPU_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{2, "iGPU_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{2, "CPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{2, "iGPU_01"}, + PriorityParams{3, "dGPU_01"}}}, + ConfigParams{"BIN", + true, + {PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{3, "dGPU_01"}, + PriorityParams{0, "CPU_01"}, + PriorityParams{1, "iGPU_01"}, + PriorityParams{2, "dGPU_01"}, + PriorityParams{3, "dGPU_01"}}}}; +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + KeyNetworkPriorityTest, + ::testing::ValuesIn(testConfigs), + KeyNetworkPriorityTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/life_time_test.cpp b/src/plugins/auto/tests/unit/life_time_test.cpp new file mode 100644 index 00000000000000..a014505ddfcd58 --- /dev/null +++ b/src/plugins/auto/tests/unit/life_time_test.cpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "include/auto_unit_test.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp" +using namespace ov::mock_auto_plugin; + +using ConfigParams = std::tuple; + +class AutoLifeTimeTest : public tests::AutoTest, public ::testing::Test { +public: + void SetUp() override { + plugin->set_device_name("AUTO"); + mock_compiled_model = {mockIExeNetActual, std::make_shared("for test")}; + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher("GPU.0"), + _)) + .WillByDefault(Return(mock_compiled_model)); + mock_states = {ov::SoPtr(std::make_shared>(), + std::make_shared("for test"))}; + EXPECT_CALL(*inferReqInternalActual, query_state()).WillRepeatedly(Return(mock_states)); + } + + void TearDown() override { + testing::Mock::AllowLeak(mock_states.front()._ptr.get()); + testing::Mock::AllowLeak(inferReqInternalActual.get()); + } + +protected: + ov::SoPtr mock_compiled_model; + std::vector> mock_states; +}; + +TEST_F(AutoLifeTimeTest, loaded_tensor) { + // get Parameter + config.insert(ov::device::priorities("GPU.0")); + std::shared_ptr compiled_model; + ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + auto request = compiled_model->create_infer_request(); + for (auto& iter : request->get_inputs()) { + auto tensor = request->get_tensor(iter); + ASSERT_EQ(tensor._so, mock_compiled_model._so); + } +} + +TEST_F(AutoLifeTimeTest, loaded_states) { + // get Parameter + config.insert(ov::device::priorities("GPU.0")); + std::shared_ptr compiled_model; + ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + auto request = compiled_model->create_infer_request(); + auto states = request->query_state(); + auto res_so = mock_states.front()._so; + for (auto& state : states) + ASSERT_EQ(state._so, res_so); +} + +TEST_F(AutoLifeTimeTest, loaded_tensor_multi) { + plugin->set_device_name("MULTI"); + // get Parameter + config.insert(ov::device::priorities("GPU.0")); + std::shared_ptr compiled_model; + ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + auto request = compiled_model->create_infer_request(); + for (auto& iter : request->get_inputs()) { + auto tensor = request->get_tensor(iter); + ASSERT_EQ(tensor._so, mock_compiled_model._so); + } +} + +TEST_F(AutoLifeTimeTest, loaded_states_bind_buffer) { + // get Parameter + config.insert(ov::device::priorities("GPU.0")); + config.insert(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); + config.insert(ov::intel_auto::device_bind_buffer(true)); + std::shared_ptr compiled_model; + ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config)); + auto request = compiled_model->create_infer_request(); + auto states = request->query_state(); + auto res_so = mock_states.front()._so; + for (auto& state : states) + ASSERT_EQ(state._so, res_so); +} \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/log_utils_format_test.cpp b/src/plugins/auto/tests/unit/log_utils_format_test.cpp index 74d6cd9f09317e..a7a8498affee69 100644 --- a/src/plugins/auto/tests/unit/log_utils_format_test.cpp +++ b/src/plugins/auto/tests/unit/log_utils_format_test.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // - -#include #include -#include "utils/log_util.hpp" +#include + #include + +#include "utils/log_util.hpp" using namespace ov::mock_auto_plugin; using ::testing::_; class LogUtilsFormatTest : public ::testing::Test { @@ -19,7 +20,7 @@ class LogUtilsFormatTest : public ::testing::Test { MockLog::release(); } - void traceCallStacksTest(){ + void traceCallStacksTest() { TraceCallStacks("test"); } }; @@ -34,8 +35,8 @@ TEST_F(LogUtilsFormatTest, format_s) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%sabc", "DEBUG"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -45,8 +46,8 @@ TEST_F(LogUtilsFormatTest, format_d) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%dabc", -1); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -57,8 +58,8 @@ TEST_F(LogUtilsFormatTest, format_ld) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%ldabc", -3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -69,8 +70,8 @@ TEST_F(LogUtilsFormatTest, format_u) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%uabc", 1); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -81,8 +82,8 @@ TEST_F(LogUtilsFormatTest, format_lu) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%luabc", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -93,8 +94,8 @@ TEST_F(LogUtilsFormatTest, format_s_d_ld_u_lu) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%s,%d,%ld,%u,%lu,abc", "DEBUG", -1, -3, 1, 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -105,8 +106,8 @@ TEST_F(LogUtilsFormatTest, format_s_d_ld_u_lu2) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%s%d%ld%u%luabc", "DEBUG", -1, -3, 1, 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -117,8 +118,8 @@ TEST_F(LogUtilsFormatTest, format_lf) { std::string pattern{"abc"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%lfabc", 1.33); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -129,8 +130,8 @@ TEST_F(LogUtilsFormatTest, format_p) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%p", MockLog::m_mocklog); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -141,8 +142,8 @@ TEST_F(LogUtilsFormatTest, format_x) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%x", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -153,8 +154,8 @@ TEST_F(LogUtilsFormatTest, format_X) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%X", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -165,8 +166,8 @@ TEST_F(LogUtilsFormatTest, format_o) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%o", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -177,8 +178,8 @@ TEST_F(LogUtilsFormatTest, format_e) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%e", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -189,8 +190,8 @@ TEST_F(LogUtilsFormatTest, format_E) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%E", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -201,8 +202,8 @@ TEST_F(LogUtilsFormatTest, format_f) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%f", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -213,8 +214,8 @@ TEST_F(LogUtilsFormatTest, format_F) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%F", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -225,8 +226,8 @@ TEST_F(LogUtilsFormatTest, format_g) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%g", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -237,21 +238,20 @@ TEST_F(LogUtilsFormatTest, format_G) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%G", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); } - TEST_F(LogUtilsFormatTest, format_a) { std::string printResult = ""; std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%a", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -262,8 +262,8 @@ TEST_F(LogUtilsFormatTest, format_A) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%A", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -274,8 +274,8 @@ TEST_F(LogUtilsFormatTest, format_c) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%c", 3); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -287,8 +287,8 @@ TEST_F(LogUtilsFormatTest, format_n) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%n", &num); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -299,8 +299,8 @@ TEST_F(LogUtilsFormatTest, format__) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%%"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -311,8 +311,8 @@ TEST_F(LogUtilsFormatTest, format_s__) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%s%%", "DEBUG"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -324,8 +324,8 @@ TEST_F(LogUtilsFormatTest, format_dn) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("%d%n", num, &num); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -337,8 +337,8 @@ TEST_F(LogUtilsFormatTest, format_ccccdn) { std::string pattern{"not valid"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("cccc%d%n", num, &num); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -349,8 +349,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_error) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]ERROR\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_ERROR("test"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -361,8 +361,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_warning) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]W\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_WARNING("test"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -373,8 +373,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_info) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]I\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_INFO("test"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -385,8 +385,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_debug) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]D\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_DEBUG("test"); EXPECT_TRUE(std::regex_search(printResult, regex)); @@ -398,8 +398,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_trace) { std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]T\\[.+:[0-9]+\\].*"}; std::regex regex(pattern); ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { - printResult = stream.str(); - }); + printResult = stream.str(); + }); EXPECT_CALL(*(HLogger), print(_)).Times(1); LOG_TRACE(true, "test", "TRACE"); EXPECT_TRUE(std::regex_search(printResult, regex)); diff --git a/src/plugins/auto/tests/unit/log_utils_test.cpp b/src/plugins/auto/tests/unit/log_utils_test.cpp index 53dc7c64fec4d6..6a9b154225d377 100644 --- a/src/plugins/auto/tests/unit/log_utils_test.cpp +++ b/src/plugins/auto/tests/unit/log_utils_test.cpp @@ -2,31 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // - -#include #include -#include "utils/log_util.hpp" +#include + #include + +#include "utils/log_util.hpp" using ::testing::_; using namespace ov::mock_auto_plugin; // disable using windows.h #if 0 -#if defined(_WIN32) -#include -#elif defined(__linux__) -#include -#elif defined(__APPLE__) -#include -#else -#endif +# if defined(_WIN32) +# include +# elif defined(__linux__) +# include +# elif defined(__APPLE__) +# include +# else +# endif #endif MockLog* MockLog::m_mocklog = NULL; -using ConfigParams = std::tuple< - std::string, // logLevel - std::string, // envlogLevel - int // expectCallNum - >; +using ConfigParams = std::tuple; class LogUtilsTest : public ::testing::TestWithParam { public: std::string _logLevel; @@ -40,21 +40,20 @@ class LogUtilsTest : public ::testing::TestWithParam { int expectCallNum; std::tie(logLevel, envLogLevel, expectCallNum) = obj.param; std::ostringstream result; - result << "logLevel_" << logLevel << "_expectCallNum_" << expectCallNum - << "envlogLevel" << envLogLevel; + result << "logLevel_" << logLevel << "_expectCallNum_" << expectCallNum << "envlogLevel" << envLogLevel; return result.str(); } #if 0 void SetTestEnv(std::string key, std::string value) { -#ifdef WIN32 +# ifdef WIN32 SetEnvironmentVariable(key.c_str(), value.c_str()); -#elif defined(__linux__) +# elif defined(__linux__) ::setenv(key.c_str(), value.c_str(), true); -#elif defined(__APPLE__) +# elif defined(__APPLE__) ::setenv(key.c_str(), value.c_str(), true); -#else -#endif +# else +# endif } #endif void SetUp() override { @@ -88,9 +87,10 @@ TEST_P(LogUtilsTest, set_log_level) { TEST_P(LogUtilsTest, INFO_RUN) { set_log_level(_logLevel); int a = 0; - INFO_RUN([&a](){a++;}); - if (_logLevel == "LOG_INFO" || _logLevel == "LOG_DEBUG" || - _logLevel == "LOG_TRACE") { + INFO_RUN([&a]() { + a++; + }); + if (_logLevel == "LOG_INFO" || _logLevel == "LOG_DEBUG" || _logLevel == "LOG_TRACE") { EXPECT_EQ(a, 1); } else { EXPECT_EQ(a, 0); @@ -100,7 +100,9 @@ TEST_P(LogUtilsTest, INFO_RUN) { TEST_P(LogUtilsTest, DEBUG_RUN) { set_log_level(_logLevel); int a = 0; - DEBUG_RUN([&a](){a++;}); + DEBUG_RUN([&a]() { + a++; + }); if (_logLevel == "LOG_DEBUG" || _logLevel == "LOG_TRACE") { EXPECT_EQ(a, 1); } else { @@ -117,10 +119,10 @@ TEST_P(LogUtilsTest, setEnvNotAffectset_log_level) { } #endif -//can not test ENV case. because of the ENV variable is readed at the -//beginning of test application and modify it in runtime is not valid -//still need to test it in different platform manully -//TEST_P(LogUtilsTest, setEnvLogLevel) { +// can not test ENV case. because of the ENV variable is readed at the +// beginning of test application and modify it in runtime is not valid +// still need to test it in different platform manully +// TEST_P(LogUtilsTest, setEnvLogLevel) { // SetTestEnv("AUTO_LOG_LEVEL", _envLogLevel); // EXPECT_CALL(*(HLogger), print(_)).Times(_expectCallNum); // printLog(); @@ -132,8 +134,8 @@ TEST(smoke_Auto_BehaviorTests, LogUtilsSingleton) { std::shared_ptr instanceVector[20]; for (unsigned int i = 0; i < 20; i++) { auto future = std::async(std::launch::async, [&instanceVector, i] { - instanceVector[i] = Log::instance(); - }); + instanceVector[i] = Log::instance(); + }); futureVect.push_back(std::move(future)); } @@ -143,20 +145,19 @@ TEST(smoke_Auto_BehaviorTests, LogUtilsSingleton) { for (unsigned int i = 0; i < 19; i++) { EXPECT_NE(instanceVector[i].get(), nullptr); - EXPECT_EQ(instanceVector[i].get(), instanceVector[i+1].get()); + EXPECT_EQ(instanceVector[i].get(), instanceVector[i + 1].get()); } } -const std::vector testConfigs = -{ConfigParams {"LOG_NONE", "0", 0}, - ConfigParams {"LOG_NONE", "1", 0}, - ConfigParams {"LOG_ERROR", "2", 2}, - ConfigParams {"LOG_WARNING", "3", 4}, - ConfigParams {"LOG_INFO", "4", 6}, - ConfigParams {"LOG_DEBUG", "5", 8}, - ConfigParams {"LOG_TRACE", "6", 10}}; - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, LogUtilsTest, - ::testing::ValuesIn(testConfigs), - LogUtilsTest::getTestCaseName); - +const std::vector testConfigs = {ConfigParams{"LOG_NONE", "0", 0}, + ConfigParams{"LOG_NONE", "1", 0}, + ConfigParams{"LOG_ERROR", "2", 2}, + ConfigParams{"LOG_WARNING", "3", 4}, + ConfigParams{"LOG_INFO", "4", 6}, + ConfigParams{"LOG_DEBUG", "5", 8}, + ConfigParams{"LOG_TRACE", "6", 10}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + LogUtilsTest, + ::testing::ValuesIn(testConfigs), + LogUtilsTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/mock_common.cpp b/src/plugins/auto/tests/unit/mock_common.cpp deleted file mode 100644 index 122fe8b9ecc58a..00000000000000 --- a/src/plugins/auto/tests/unit/mock_common.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/mock_common.hpp" -#include "openvino/runtime/make_tensor.hpp" - -// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any -// it will cause core dump, so add this special implemented -namespace testing { -namespace internal { - template<> - void PrintTo(const ov::Any& a, std::ostream* os) { - *os << "using custom PrintTo ov::Any"; - } -} -} - -namespace ov { -MockAsyncInferRequest::MockAsyncInferRequest(const std::shared_ptr& request, - const std::shared_ptr& task_executor, - const std::shared_ptr& callback_executor, - bool ifThrow) - : IAsyncInferRequest(request, task_executor, callback_executor), m_throw(ifThrow) { - m_pipeline = {}; - m_pipeline.push_back({task_executor, - [this] { - if (m_throw) - OPENVINO_THROW("runtime inference failure"); - } }); -} - -void MockSyncInferRequest::allocate_tensor_impl(ov::SoPtr& tensor, const element::Type& element_type, const Shape& shape) { - if (!tensor || tensor->get_element_type() != element_type) { - tensor = ov::make_tensor(element_type, shape); - } else { - tensor->set_shape(shape); - } -} - -MockSyncInferRequest::MockSyncInferRequest(const std::shared_ptr& compiled_model) - : ov::ISyncInferRequest(compiled_model) { - OPENVINO_ASSERT(compiled_model); - // Allocate input/output tensors - for (const auto& input : get_inputs()) { - allocate_tensor(input, [this, input](ov::SoPtr& tensor) { - // Can add a check to avoid double work in case of shared tensors - allocate_tensor_impl(tensor, - input.get_element_type(), - input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape()); - }); - } - for (const auto& output : get_outputs()) { - allocate_tensor(output, [this, output](ov::SoPtr& tensor) { - // Can add a check to avoid double work in case of shared tensors - allocate_tensor_impl(tensor, - output.get_element_type(), - output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape()); - }); - } -} -} //namespace ov diff --git a/src/plugins/auto/tests/unit/parse_meta_device_test.cpp b/src/plugins/auto/tests/unit/parse_meta_device_test.cpp index 422eff1a8d325a..42b6d3de2ca97e 100644 --- a/src/plugins/auto/tests/unit/parse_meta_device_test.cpp +++ b/src/plugins/auto/tests/unit/parse_meta_device_test.cpp @@ -38,18 +38,18 @@ class ParseMetaDeviceTest : public tests::AutoTest, public ::testing::TestWithPa void SetUp() override { ON_CALL(*core, get_supported_property(StrEq("INVALID_DEVICE"), _)).WillByDefault(Throw(ov::Exception(""))); ON_CALL(*core, get_property(StrEq("GPU.2"), ov::supported_properties.name(), _)) - .WillByDefault(Throw(ov::Exception(""))); - ON_CALL(*plugin, parse_meta_devices).WillByDefault([this](const std::string& priorityDevices, - const ov::AnyMap& config) { - return plugin->Plugin::parse_meta_devices(priorityDevices, config); - }); - std::tie(priorityDevices, metaDevices, throwException, expectedTimes) = GetParam(); + .WillByDefault(Throw(ov::Exception(""))); + ON_CALL(*plugin, parse_meta_devices) + .WillByDefault([this](const std::string& priorityDevices, const ov::AnyMap& config) { + return plugin->Plugin::parse_meta_devices(priorityDevices, config); + }); + std::tie(priorityDevices, metaDevices, throwException, expectedTimes) = GetParam(); } void compare(std::vector& result, std::vector& expect) { EXPECT_EQ(result.size(), expect.size()); if (result.size() == expect.size()) { - for (unsigned int i = 0 ; i < result.size(); i++) { + for (unsigned int i = 0; i < result.size(); i++) { EXPECT_EQ(result[i].device_name, expect[i].device_name); EXPECT_EQ(result[i].unique_name, expect[i].unique_name); EXPECT_EQ(result[i].num_requests_per_devices, expect[i].num_requests_per_devices); @@ -61,7 +61,7 @@ class ParseMetaDeviceTest : public tests::AutoTest, public ::testing::TestWithPa void compareDevicePriority(std::vector& result, std::vector& expect) { EXPECT_EQ(result.size(), expect.size()); if (result.size() == expect.size()) { - for (unsigned int i = 0 ; i < result.size(); i++) { + for (unsigned int i = 0; i < result.size(); i++) { EXPECT_EQ(result[i].device_priority, expect[i].device_priority); } } @@ -84,9 +84,9 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDevicesWithPriority) { if (throwException) { ASSERT_ANY_THROW(plugin->parse_meta_devices(priorityDevices, {})); } else { - auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)}); - compare(result, metaDevices); - compareDevicePriority(result, metaDevices); + auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)}); + compare(result, metaDevices); + compareDevicePriority(result, metaDevices); } } @@ -97,16 +97,16 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDevicesNotWithPriority) { if (throwException) { ASSERT_ANY_THROW(plugin->parse_meta_devices(priorityDevices, {})); } else { - auto result = plugin->parse_meta_devices(priorityDevices, {}); - compare(result, metaDevices); - for (unsigned int i = 0 ; i < result.size(); i++) { - EXPECT_EQ(result[i].device_priority, 0); - } - auto result2 = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities("")}); - compare(result2, metaDevices); - for (unsigned int i = 0 ; i < result.size(); i++) { - EXPECT_EQ(result2[i].device_priority, 0); - } + auto result = plugin->parse_meta_devices(priorityDevices, {}); + compare(result, metaDevices); + for (unsigned int i = 0; i < result.size(); i++) { + EXPECT_EQ(result[i].device_priority, 0); + } + auto result2 = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities("")}); + compare(result2, metaDevices); + for (unsigned int i = 0; i < result.size(); i++) { + EXPECT_EQ(result2[i].device_priority, 0); + } } } @@ -119,9 +119,9 @@ TEST_P(ParseMetaDeviceNoIDTest, ParseMetaDevices) { if (throwException) { ASSERT_ANY_THROW(plugin->parse_meta_devices(priorityDevices, {})); } else { - auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)}); - compare(result, metaDevices); - compareDevicePriority(result, metaDevices); + auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)}); + compare(result, metaDevices); + compareDevicePriority(result, metaDevices); } } // ConfigParams details @@ -129,11 +129,7 @@ TEST_P(ParseMetaDeviceNoIDTest, ParseMetaDevices) { // ConfigParams {devicePriority, expect metaDevices, ifThrowException} const std::vector testConfigs = { - ConfigParams{"CPU,GPU.2,OTHER", - {{"CPU", {}, -1, "", "CPU_", 0}, - {"OTHER", {}, -1, "", "OTHER_", 2}}, - false, - 3}, + ConfigParams{"CPU,GPU.2,OTHER", {{"CPU", {}, -1, "", "CPU_", 0}, {"OTHER", {}, -1, "", "OTHER_", 2}}, false, 3}, ConfigParams{"CPU,GPU,OTHER", {{"CPU", {}, -1, "", "CPU_", 0}, {"GPU.0", {}, -1, "", std::string(igpuFullDeviceName) + "_0", 1}, @@ -189,13 +185,15 @@ const std::vector testConfigsNoID = { 3}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ParseMetaDeviceTest, - ::testing::ValuesIn(testConfigs), - ParseMetaDeviceTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + ParseMetaDeviceTest, + ::testing::ValuesIn(testConfigs), + ParseMetaDeviceTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ParseMetaDeviceNoIDTest, - ::testing::ValuesIn(testConfigsNoID), - ParseMetaDeviceTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + ParseMetaDeviceNoIDTest, + ::testing::ValuesIn(testConfigsNoID), + ParseMetaDeviceTest::getTestCaseName); -//toDo need add test for ParseMetaDevices(_, config) to check device config of -//return metaDevices +// toDo need add test for ParseMetaDevices(_, config) to check device config of +// return metaDevices diff --git a/src/plugins/auto/tests/unit/property_test.cpp b/src/plugins/auto/tests/unit/property_test.cpp deleted file mode 100644 index 0639830f7357fd..00000000000000 --- a/src/plugins/auto/tests/unit/property_test.cpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "include/auto_unit_test.hpp" -using namespace ov::mock_auto_plugin::tests; - -class MultiPropertyTest : public tests::AutoTestWithRealCore, public ::testing::Test { -public: - void SetUp() override { - plugin->set_device_name("MULTI"); - std::shared_ptr base_plugin = plugin; - reg_plugin(core, base_plugin, "MOCK_MULTI", {}); - // validate mock plugin - core.get_property("MOCK_MULTI", ov::supported_properties); - } -}; - -class AutoPropertyTest : public tests::AutoTestWithRealCore, public ::testing::Test { -public: - void SetUp() override { - plugin->set_device_name("AUTO"); - std::shared_ptr base_plugin = plugin; - reg_plugin(core, base_plugin, "MOCK_AUTO", {}); - core.get_property("MOCK_AUTO", ov::supported_properties); - } -}; - - -/* to be enabled if expect multi throw for latency mode -TEST_F(PropertyTest, tputmodeonly_for_multi) { - EXPECT_THROW_WITH_MESSAGE(core.compile_model(model, "MULTI", ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), ov::Exception, - "MULTI does not support perf mode"); - ASSERT_NO_THROW(compiled_model = core.compile_model(model, "MULTI", ov::device::priorities("MOCK_GPU", "MOCK_CPU"))); - EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::THROUGHPUT); -} - -TEST_F(PropertyTest, tputmodeonly_for_multi_propertyset) { - ASSERT_NO_THROW(core.get_property("MULTI", ov::supported_properties)); - EXPECT_THROW_WITH_MESSAGE(core.set_property("MULTI", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), ov::Exception, - "MULTI does not support perf mode"); -} -*/ -/* -TEST_F(PropertyTest, default_perfmode_for_auto) { - ov::CompiledModel compiled_model; - EXPECT_NO_THROW(compiled_model = core.compile_model(model, "AUTO", ov::device::priorities("MOCK_GPU", "MOCK_CPU"))); - EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::LATENCY); -} -*/ - -TEST_F(MultiPropertyTest, default_perfmode_for_multi) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU")})); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::THROUGHPUT); -} - -TEST_F(MultiPropertyTest, respect_secondary_property) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - {"DEVICE_PROPERTIES", "{MOCK_CPU:{PERFORMANCE_HINT:LATENCY},MOCK_GPU:{PERFORMANCE_HINT:LATENCY}"}})); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::THROUGHPUT); -} - -TEST_F(AutoPropertyTest, default_perfmode_for_auto_ctput) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)})); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT); -} - -TEST_F(AutoPropertyTest, default_perfmode_for_auto) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::LATENCY); -} - -TEST_F(AutoPropertyTest, respect_secondary_property_auto_ctput) { - EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("LATENCY")))).Times(1); - EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ComparePerfHint("THROUGHPUT")))).Times(1); - ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - {"DEVICE_PROPERTIES", "{MOCK_CPU:{PERFORMANCE_HINT:LATENCY},MOCK_GPU:{PERFORMANCE_HINT:THROUGHPUT}"}})); - EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT); -} \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/release_helper_test.cpp b/src/plugins/auto/tests/unit/release_helper_test.cpp index c90139bdd8f244..77f35a9cefe9f1 100644 --- a/src/plugins/auto/tests/unit/release_helper_test.cpp +++ b/src/plugins/auto/tests/unit/release_helper_test.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include -#include "common_test_utils/test_constants.hpp" #include "include/auto_unit_test.hpp" using Config = std::map; using namespace ov::mock_auto_plugin; -using ConfigParams = std::tuple< - bool, // cpu load success - bool // hw device load success - >; +using ConfigParams = std::tuple; class AutoReleaseHelperTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { @@ -21,7 +20,7 @@ class AutoReleaseHelperTest : public tests::AutoTest, public ::testing::TestWith bool accSuccess; std::tie(cpuSuccess, accSuccess) = obj.param; std::ostringstream result; - if (!cpuSuccess) { + if (!cpuSuccess) { result << "cpuLoadFailure_"; } else { result << "cpuLoadSuccess_"; @@ -43,33 +42,42 @@ TEST_P(AutoReleaseHelperTest, releaseResource) { size_t decreaseCount = 0; // test auto plugin plugin->set_device_name("AUTO"); - const std::string strDevices = ov::test::utils::DEVICE_GPU + std::string(",") + - ov::test::utils::DEVICE_CPU; + const std::string strDevices = ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU; if (accSuccess) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkActual; })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkActual; + })); } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), _)) - .WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - OPENVINO_THROW(""); - return mockExeNetworkActual; })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_GPU)), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + OPENVINO_THROW(""); + return mockExeNetworkActual; + })); } if (cpuSuccess) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetwork)); if (accSuccess) decreaseCount++; } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), _)) - .WillByDefault(Throw(InferenceEngine::GeneralError{""})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Throw(InferenceEngine::GeneralError{""})); } metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}}; DeviceInformation devInfo; @@ -80,15 +88,16 @@ TEST_P(AutoReleaseHelperTest, releaseResource) { return devices; }); ON_CALL(*plugin, select_device(Property(&std::vector::size, Eq(2)), _, _)) - .WillByDefault(Return(metaDevices[1])); + .WillByDefault(Return(metaDevices[1])); ON_CALL(*plugin, select_device(Property(&std::vector::size, Eq(1)), _, _)) - .WillByDefault(Return(metaDevices[0])); + .WillByDefault(Return(metaDevices[0])); config.insert(ov::device::priorities(ov::test::utils::DEVICE_CPU + std::string(",") + ov::test::utils::DEVICE_GPU)); std::shared_ptr exeNetwork; if (cpuSuccess || accSuccess) { ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config)); if (!cpuSuccess) - EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), ov::test::utils::DEVICE_GPU); + EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), + ov::test::utils::DEVICE_GPU); else EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), "(CPU)"); } else { @@ -101,19 +110,21 @@ TEST_P(AutoReleaseHelperTest, releaseResource) { EXPECT_EQ(inferReqInternal.use_count(), requestsharedcount - decreaseCount); if (cpuSuccess || accSuccess) { if (accSuccess) - EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), ov::test::utils::DEVICE_GPU); + EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), + ov::test::utils::DEVICE_GPU); else - EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), ov::test::utils::DEVICE_CPU); + EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as(), + ov::test::utils::DEVICE_CPU); } } // -const std::vector testConfigs = {ConfigParams {true, true}, - ConfigParams {true, false}, - ConfigParams {false, true}, - ConfigParams {false, false} - }; +const std::vector testConfigs = {ConfigParams{true, true}, + ConfigParams{true, false}, + ConfigParams{false, true}, + ConfigParams{false, false}}; -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, AutoReleaseHelperTest, - ::testing::ValuesIn(testConfigs), - AutoReleaseHelperTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + AutoReleaseHelperTest, + ::testing::ValuesIn(testConfigs), + AutoReleaseHelperTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/runtime_fallback_test.cpp b/src/plugins/auto/tests/unit/runtime_fallback_test.cpp index 12158884c3fd64..bf837b7aaf4dcf 100644 --- a/src/plugins/auto/tests/unit/runtime_fallback_test.cpp +++ b/src/plugins/auto/tests/unit/runtime_fallback_test.cpp @@ -5,29 +5,28 @@ #include #include "include/auto_unit_test.hpp" -#include "openvino/runtime/threading/immediate_executor.hpp" #include "openvino/runtime/auto/properties.hpp" +#include "openvino/runtime/threading/immediate_executor.hpp" using namespace ov::mock_auto_plugin; using ConfigParams = std::tuple>, int, bool, bool, bool, bool>; -class AutoRuntimeFallback : public tests::AutoTest, - public ::testing::TestWithParam { +class AutoRuntimeFallback : public tests::AutoTest, public ::testing::TestWithParam { public: - ov::SoPtr mockExeNetworkGPU_1; - ov::SoPtr mockExeNetworkOTHER; + ov::SoPtr mockExeNetworkGPU_1; + ov::SoPtr mockExeNetworkOTHER; - std::shared_ptr> inferReqInternalGPU_1; - std::shared_ptr> inferReqInternalOTHER; + std::shared_ptr> inferReqInternalGPU_1; + std::shared_ptr> inferReqInternalOTHER; - std::shared_ptr> mockIExeNetGPU_1; - std::shared_ptr> mockIExeNetOTHER; + std::shared_ptr> mockIExeNetGPU_1; + std::shared_ptr> mockIExeNetOTHER; - std::shared_ptr mockInferrequest; - std::shared_ptr mockInferrequestGPU_0; - std::shared_ptr mockInferrequestGPU_1; - std::shared_ptr mockInferrequestOTHER; + std::shared_ptr mockInferrequest; + std::shared_ptr mockInferrequestGPU_0; + std::shared_ptr mockInferrequestGPU_1; + std::shared_ptr mockInferrequestOTHER; std::shared_ptr mockExecutor; std::shared_ptr mockExecutorGPU_0; @@ -42,7 +41,12 @@ class AutoRuntimeFallback : public tests::AutoTest, bool expectThrow; bool loadNetworkFail; bool generateWorkersFail; - std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = obj.param; + std::tie(targetDevices, + loadNetworkNum, + enableRumtimeFallback, + expectThrow, + loadNetworkFail, + generateWorkersFail) = obj.param; std::ostringstream result; result << "auto_runtime_fallback_"; for (auto deviceInfo : targetDevices) { @@ -82,40 +86,59 @@ class AutoRuntimeFallback : public tests::AutoTest, void SetUp() override { // prepare extra mockExeNetwork - mockIExeNetGPU_1 = std::make_shared>(model, plugin); + mockIExeNetGPU_1 = std::make_shared>(model, plugin); mockExeNetworkGPU_1 = {mockIExeNetGPU_1, {}}; - mockIExeNetOTHER = std::make_shared>(model, plugin); + mockIExeNetOTHER = std::make_shared>(model, plugin); mockExeNetworkOTHER = {mockIExeNetOTHER, {}}; - + ON_CALL(*mockIExeNetGPU_1.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs())); + ON_CALL(*mockIExeNetGPU_1.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs())); + ON_CALL(*mockIExeNetOTHER.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs())); + ON_CALL(*mockIExeNetOTHER.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs())); // prepare mockicore and cnnNetwork for loading - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU.0")), _)).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkActual; })); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU.1")), _)).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkGPU_1; })); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("OTHER")), _)).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(200)); - return mockExeNetworkOTHER; })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU.0")), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkActual; + })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU.1")), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkGPU_1; + })); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("OTHER")), + _)) + .WillByDefault(InvokeWithoutArgs([this]() { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + return mockExeNetworkOTHER; + })); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), - (_))).WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + (_))) + .WillByDefault(Return(mockExeNetwork)); mockExecutor = std::make_shared(); mockExecutorGPU_0 = std::make_shared(); - inferReqInternalGPU_1 = std::make_shared>(mockIExeNetGPU_1); + inferReqInternalGPU_1 = + std::make_shared>(mockIExeNetGPU_1); mockExecutorGPU_1 = std::make_shared(); ON_CALL(*mockIExeNetGPU_1, get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) - .WillByDefault(Return(optimalNum)); + .WillByDefault(Return(optimalNum)); - inferReqInternalOTHER = std::make_shared>(mockIExeNetOTHER); + inferReqInternalOTHER = + std::make_shared>(mockIExeNetOTHER); mockExecutorOTHER = std::make_shared(); ON_CALL(*mockIExeNetOTHER, get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) .WillByDefault(Return(optimalNum)); @@ -132,11 +155,14 @@ TEST_P(AutoRuntimeFallback, releaseResource) { bool expectThrow; bool loadNetworkFail; bool generateWorkersFail; - std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = this->GetParam(); + std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = + this->GetParam(); if (loadNetworkFail) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU.1")), - _)).WillByDefault(Throw(ov::Exception{"compile model error"})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU.1")), + _)) + .WillByDefault(Throw(ov::Exception{"compile model error"})); } for (auto& deviceInfo : targetDevices) { std::string deviceName; @@ -145,30 +171,45 @@ TEST_P(AutoRuntimeFallback, releaseResource) { targetDev += deviceName; targetDev += ((deviceInfo == targetDevices.back()) ? "" : ","); if (deviceName == "CPU") { - mockInferrequest = std::make_shared( - inferReqInternal, mockExecutor, nullptr, ifThrow); + mockInferrequest = std::make_shared(inferReqInternal, + mockExecutor, + nullptr, + ifThrow); ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault(Return(mockInferrequest)); } else if (deviceName == "GPU.0") { - mockInferrequestGPU_0 = std::make_shared( - inferReqInternalActual, mockExecutorGPU_0, nullptr, ifThrow); + mockInferrequestGPU_0 = + std::make_shared(inferReqInternalActual, + mockExecutorGPU_0, + nullptr, + ifThrow); ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(0)); - return mockInferrequestGPU_0; })); + std::this_thread::sleep_for(std::chrono::milliseconds(0)); + return mockInferrequestGPU_0; + })); } else if (deviceName == "GPU.1") { if (generateWorkersFail) { - mockInferrequestGPU_1 = std::make_shared( - inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow); + mockInferrequestGPU_1 = + std::make_shared(inferReqInternalGPU_1, + mockExecutorGPU_1, + nullptr, + ifThrow); ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(Throw(ov::Exception{"error"})); } else { - mockInferrequestGPU_1 = std::make_shared( - inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow); + mockInferrequestGPU_1 = + std::make_shared(inferReqInternalGPU_1, + mockExecutorGPU_1, + nullptr, + ifThrow); ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(0)); - return mockInferrequestGPU_1; })); + std::this_thread::sleep_for(std::chrono::milliseconds(0)); + return mockInferrequestGPU_1; + })); } } else if (deviceName == "OTHER") { - mockInferrequestOTHER = - std::make_shared(inferReqInternalOTHER, mockExecutorOTHER, nullptr, ifThrow); + mockInferrequestOTHER = std::make_shared(inferReqInternalOTHER, + mockExecutorOTHER, + nullptr, + ifThrow); ON_CALL(*mockIExeNetOTHER.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { std::this_thread::sleep_for(std::chrono::milliseconds(0)); return mockInferrequestOTHER; @@ -185,8 +226,8 @@ TEST_P(AutoRuntimeFallback, releaseResource) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), - ::testing::Matcher(_))) + ::testing::Matcher(_), + ::testing::Matcher(_))) .Times(loadNetworkNum); std::shared_ptr exeNetwork; @@ -206,10 +247,10 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", false}, {"GPU.1", true}}, 1, true, false, false, false}, ConfigParams{{{"GPU.0", false}, {"GPU.1", false}}, 1, true, false, false, false}, - //CPU_HELP does not throw + // CPU_HELP does not throw ConfigParams{{{"GPU.0", false}, {"CPU", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"CPU", false}}, 2, true, false, false, false}, - //CPU_HELP throw + // CPU_HELP throw ConfigParams{{{"GPU.0", false}, {"CPU", true}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"CPU", true}}, 2, true, true, false, false}, // 3 devices @@ -217,11 +258,11 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"OTHER", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", false}}, 3, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", true}}, 3, true, true, false, false}, - //CPU_HELP does not throw + // CPU_HELP does not throw ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", false}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", false}}, 2, true, false, false, false}, - //CPU_HELP throw + // CPU_HELP throw ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", true}}, 2, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", true}}, 3, true, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", true}}, 3, true, true, false, false}, @@ -230,10 +271,10 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}}, 1, false, true, false, false}, ConfigParams{{{"GPU.0", false}, {"GPU.1", true}}, 1, false, false, false, false}, ConfigParams{{{"GPU.0", false}, {"GPU.1", false}}, 1, false, false, false, false}, - //CPU_HELP does not throw + // CPU_HELP does not throw ConfigParams{{{"GPU.0", false}, {"CPU", false}}, 2, false, false, false, false}, ConfigParams{{{"GPU.0", true}, {"CPU", false}}, 2, false, false, false, false}, - //CPU_HELP throw + // CPU_HELP throw ConfigParams{{{"GPU.0", false}, {"CPU", true}}, 2, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"CPU", true}}, 2, false, true, false, false}, // 3 devices @@ -241,11 +282,11 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"OTHER", false}}, 1, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", false}}, 1, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", true}}, 1, false, true, false, false}, - //CPU_HELP does not throw + // CPU_HELP does not throw ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", false}}, 2, false, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", false}}, 2, false, false, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", false}}, 2, false, false, false, false}, - //CPU_HELP throw + // CPU_HELP throw ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", true}}, 2, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", true}}, 2, false, true, false, false}, ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", true}}, 2, false, true, false, false}, @@ -254,23 +295,27 @@ const std::vector testConfigs = { ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"OTHER", false}}, 3, true, false, false, true}, }; -INSTANTIATE_TEST_SUITE_P(smoke_AutoRuntimeFallback, AutoRuntimeFallback, - ::testing::ValuesIn(testConfigs), - AutoRuntimeFallback::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_AutoRuntimeFallback, + AutoRuntimeFallback, + ::testing::ValuesIn(testConfigs), + AutoRuntimeFallback::getTestCaseName); TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) { std::string targetDev; - std::vector> targetDevices; //std::tuple + std::vector> targetDevices; // std::tuple int loadNetworkNum; bool enableRumtimeFallback; bool expectThrow; bool loadNetworkFail; bool generateWorkersFail; - std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = this->GetParam(); + std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = + this->GetParam(); if (loadNetworkFail) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq("GPU.1")), - _)).WillByDefault(Throw(ov::Exception{"compile model error"})); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq("GPU.1")), + _)) + .WillByDefault(Throw(ov::Exception{"compile model error"})); } for (auto& deviceInfo : targetDevices) { std::string deviceName; @@ -279,26 +324,39 @@ TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) { targetDev += deviceName; targetDev += ((deviceInfo == targetDevices.back()) ? "" : ","); if (deviceName == "CPU") { - mockInferrequest = std::make_shared( - inferReqInternal, mockExecutor, nullptr, ifThrow); + mockInferrequest = std::make_shared(inferReqInternal, + mockExecutor, + nullptr, + ifThrow); ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault(Return(mockInferrequest)); } else if (deviceName == "GPU.0") { - mockInferrequestGPU_0 = std::make_shared( - inferReqInternalActual, mockExecutorGPU_0, nullptr, ifThrow); + mockInferrequestGPU_0 = + std::make_shared(inferReqInternalActual, + mockExecutorGPU_0, + nullptr, + ifThrow); ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(0)); - return mockInferrequestGPU_0; })); + std::this_thread::sleep_for(std::chrono::milliseconds(0)); + return mockInferrequestGPU_0; + })); } else if (deviceName == "GPU.1") { if (generateWorkersFail) { - mockInferrequestGPU_1 = std::make_shared( - inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow); + mockInferrequestGPU_1 = + std::make_shared(inferReqInternalGPU_1, + mockExecutorGPU_1, + nullptr, + ifThrow); ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(Throw(ov::Exception{"error"})); } else { - mockInferrequestGPU_1 = std::make_shared( - inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow); + mockInferrequestGPU_1 = + std::make_shared(inferReqInternalGPU_1, + mockExecutorGPU_1, + nullptr, + ifThrow); ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() { - std::this_thread::sleep_for(std::chrono::milliseconds(0)); - return mockInferrequestGPU_1; })); + std::this_thread::sleep_for(std::chrono::milliseconds(0)); + return mockInferrequestGPU_1; + })); } } } @@ -311,8 +369,8 @@ TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) { EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), - ::testing::Matcher(_))) + ::testing::Matcher(_), + ::testing::Matcher(_))) .Times(loadNetworkNum); std::shared_ptr exeNetwork; diff --git a/src/plugins/auto/tests/unit/select_device_failed_test.cpp b/src/plugins/auto/tests/unit/select_device_failed_test.cpp index 92afffef4b6f82..b1a74a7113e61a 100644 --- a/src/plugins/auto/tests/unit/select_device_failed_test.cpp +++ b/src/plugins/auto/tests/unit/select_device_failed_test.cpp @@ -15,18 +15,16 @@ enum MODEL { THROUGHPUT = 2, }; -using ConfigParams = std::tuple< - bool, // if can continue to run - bool, // if select throw exception - MODEL, // config model general, latency, throughput - std::vector, // {device, loadSuccess} - unsigned int, // select count - unsigned int, // load count - unsigned int // load device success count - >; +using ConfigParams = std::tuple, // {device, loadSuccess} + unsigned int, // select count + unsigned int, // load count + unsigned int // load device success count + >; -class AutoLoadFailedTest : public tests::AutoTest, - public ::testing::TestWithParam { +class AutoLoadFailedTest : public tests::AutoTest, public ::testing::TestWithParam { public: static std::string getTestCaseName(testing::TestParamInfo obj) { unsigned int selectCount; @@ -36,8 +34,8 @@ class AutoLoadFailedTest : public tests::AutoTest, bool continueRun; bool thrExcWheSelect; MODEL configModel; - std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, - selectCount, loadCount, loadSuccessCount) = obj.param; + std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount, loadCount, loadSuccessCount) = + obj.param; std::ostringstream result; for (auto& item : deviceConfigs) { if (std::get<1>(item)) { @@ -53,22 +51,21 @@ class AutoLoadFailedTest : public tests::AutoTest, } switch (configModel) { - case GENERAL: - result << "GENERAL"; - break; - case LATENCY: - result << "LATENCY"; - break; - case THROUGHPUT: - result << "THROUGHPUT"; - break; - default: - LOG_ERROR("should not come here"); - break; + case GENERAL: + result << "GENERAL"; + break; + case LATENCY: + result << "LATENCY"; + break; + case THROUGHPUT: + result << "THROUGHPUT"; + break; + default: + LOG_ERROR("should not come here"); + break; } - result << "select_" << selectCount << "_loadCount_" - << loadCount << "_loadSuccessCount_" << loadSuccessCount; + result << "select_" << selectCount << "_loadCount_" << loadCount << "_loadSuccessCount_" << loadSuccessCount; return result.str(); } void SetUp() override { @@ -87,8 +84,8 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) { bool continueRun; bool thrExcWheSelect; MODEL configModel; - std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount, - loadCount, loadSuccessCount) = this->GetParam(); + std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount, loadCount, loadSuccessCount) = + this->GetParam(); // test auto plugin plugin->set_device_name("AUTO"); @@ -99,30 +96,37 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) { bool loadSuccess = std::get<1>(*iter); // accoding to device loading config, set if the loading will successful or throw exception. if (loadSuccess) { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(deviceName)), - (_))).WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(deviceName)), + (_))) + .WillByDefault(Return(mockExeNetwork)); } else { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(StrEq(deviceName)), - (_))) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(deviceName)), + (_))) .WillByDefault(Throw(ov::Exception{"compile error"})); } DeviceInformation devInfo; switch (configModel) { - case GENERAL: - devInfo = {deviceName, {}, 2, ""}; - break; - case LATENCY: - devInfo = {deviceName, {ov::hint::performance_mode("LATENCY"), ov::hint::allow_auto_batching(true), ov::auto_batch_timeout(1000)}, - 2, ""}; - break; - case THROUGHPUT: - devInfo = {deviceName, {ov::hint::performance_mode("THROUGHPUT")}, 2, ""}; - break; - default: - LOG_ERROR("should not come here"); - break; + case GENERAL: + devInfo = {deviceName, {}, 2, ""}; + break; + case LATENCY: + devInfo = {deviceName, + {ov::hint::performance_mode("LATENCY"), + ov::hint::allow_auto_batching(true), + ov::auto_batch_timeout(1000)}, + 2, + ""}; + break; + case THROUGHPUT: + devInfo = {deviceName, {ov::hint::performance_mode("THROUGHPUT")}, 2, ""}; + break; + default: + LOG_ERROR("should not come here"); + break; } metaDevices.push_back(std::move(devInfo)); @@ -156,9 +160,11 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) { EXPECT_CALL(*plugin, parse_meta_devices(_, _)).Times(AtLeast(1)); EXPECT_CALL(*plugin, select_device(_, _, _)).Times(selectCount); - EXPECT_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), - ::testing::Matcher(_))).Times(loadCount); + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(_), + ::testing::Matcher(_))) + .Times(loadCount); // if loadSuccess will get the optimalNum requset of per device, in this test is 2; EXPECT_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))) @@ -177,8 +183,8 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) { // DeviceParams {ov::test::utils::DEVICE_CPU, true}}, 2, 3, 2}, // // every element for ConfigParams -// {continueRun, selectThrowException, config model, deviceLoadsuccessVector, selectCount, loadCount, loadSuccessCount} -// { true, false, GENERAL, 3 device, 2, 3, 2} +// {continueRun, selectThrowException, config model, deviceLoadsuccessVector, selectCount, loadCount, +// loadSuccessCount} { true, false, GENERAL, 3 device, 2, 3, 2} // // there are three devices for loading // CPU load for accelerator success, but GPU will load faild and then select NPU and load again @@ -353,7 +359,7 @@ const std::vector testConfigs = { 3, 2}}; -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, AutoLoadFailedTest, - ::testing::ValuesIn(testConfigs), - AutoLoadFailedTest::getTestCaseName); - +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + AutoLoadFailedTest, + ::testing::ValuesIn(testConfigs), + AutoLoadFailedTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/select_device_test.cpp b/src/plugins/auto/tests/unit/select_device_test.cpp index 81a61ab3b027e5..baef090b32459c 100644 --- a/src/plugins/auto/tests/unit/select_device_test.cpp +++ b/src/plugins/auto/tests/unit/select_device_test.cpp @@ -5,30 +5,28 @@ #include "include/auto_unit_test.hpp" using namespace ov::mock_auto_plugin; -using ConfigParams = std::tuple< - std::string, // netPrecision - std::vector, // metaDevices for select - DeviceInformation, // expect DeviceInformation - bool, // throw exception - bool, // enabledevice_priority - bool // reverse total device - >; +using ConfigParams = std::tuple, // metaDevices for select + DeviceInformation, // expect DeviceInformation + bool, // throw exception + bool, // enabledevice_priority + bool // reverse total device + >; const DeviceInformation CPU_INFO = {ov::test::utils::DEVICE_CPU, {}, 2, "01", "CPU_01"}; const DeviceInformation IGPU_INFO = {"GPU.0", {}, 2, "01", "iGPU_01"}; const DeviceInformation DGPU_INFO = {"GPU.1", {}, 2, "01", "dGPU_01"}; -const DeviceInformation OTHERS_INFO = {"OTHERS", {}, 2, "01", "OTHERS" }; +const DeviceInformation OTHERS_INFO = {"OTHERS", {}, 2, "01", "OTHERS"}; const std::vector fp32DeviceVector = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector fp16DeviceVector = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector int8DeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; -const std::vector binDeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; -const std::vector batchedblobDeviceVector = {DGPU_INFO, IGPU_INFO}; +const std::vector binDeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO}; +const std::vector batchedblobDeviceVector = {DGPU_INFO, IGPU_INFO}; std::map> devicesMap = {{"FP32", fp32DeviceVector}, - {"FP16", fp16DeviceVector}, - {"INT8", int8DeviceVector}, - {"BIN", binDeviceVector}, - {"BATCHED_BLOB", batchedblobDeviceVector} - }; + {"FP16", fp16DeviceVector}, + {"INT8", int8DeviceVector}, + {"BIN", binDeviceVector}, + {"BATCHED_BLOB", batchedblobDeviceVector}}; const std::vector totalDevices = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO}; const std::vector reverseTotalDevices = {CPU_INFO, OTHERS_INFO, IGPU_INFO, DGPU_INFO}; const std::vector netPrecisions = {"FP32", "FP16", "INT8", "BIN", "BATCHED_BLOB"}; @@ -47,7 +45,7 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam std::ostringstream result; result << "_netPrecision_" << netPrecision; for (auto& item : devices) { - result << "_device_" << item.unique_name; + result << "_device_" << item.unique_name; } result << "_expect_" << expect.unique_name; if (throwExcept) { @@ -72,9 +70,14 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam } // combine select_num devices from devices and make them to ConfigParams // insert the ConfigParams into testConfigs - static void combine_device(const std::vector& devices, size_t start, - size_t* result, size_t result_index, const size_t select_num, std::string& netPrecision, - bool enabledevice_priority, bool reverse) { + static void combine_device(const std::vector& devices, + size_t start, + size_t* result, + size_t result_index, + const size_t select_num, + std::string& netPrecision, + bool enabledevice_priority, + bool reverse) { for (size_t i = start; i < devices.size() + 1 - result_index; i++) { result[result_index - 1] = i; if (result_index - 1 == 0) { @@ -100,8 +103,11 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam if (enabledevice_priority) { std::vector validDevices; for (auto& item : devicesInfo) { - auto device = std::find_if(metaDevices.begin(), metaDevices.end(), - [&item](const DeviceInformation& d)->bool{return d.unique_name == item.unique_name;}); + auto device = std::find_if(metaDevices.begin(), + metaDevices.end(), + [&item](const DeviceInformation& d) -> bool { + return d.unique_name == item.unique_name; + }); if (device != metaDevices.end()) { validDevices.push_back(*device); } @@ -118,8 +124,11 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam } } else { for (auto& item : devicesInfo) { - auto device = std::find_if(metaDevices.begin(), metaDevices.end(), - [&item](const DeviceInformation& d)->bool{return d.unique_name == item.unique_name;}); + auto device = std::find_if(metaDevices.begin(), + metaDevices.end(), + [&item](const DeviceInformation& d) -> bool { + return d.unique_name == item.unique_name; + }); if (device != metaDevices.end()) { find = true; expect = item; @@ -133,11 +142,17 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam } else { find = false; } - testConfigs.push_back(std::make_tuple(netPrecision, metaDevices, - expect, !find, enabledevice_priority, reverse)); + testConfigs.push_back( + std::make_tuple(netPrecision, metaDevices, expect, !find, enabledevice_priority, reverse)); } else { - combine_device(devices, i + 1, result, result_index - 1, - select_num, netPrecision, enabledevice_priority, reverse); + combine_device(devices, + i + 1, + result, + result_index - 1, + select_num, + netPrecision, + enabledevice_priority, + reverse); } } } @@ -178,7 +193,7 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam combine_device(reverseTotalDevices, 0, result, i, i, netPrecision, true, true); } } - delete []result; + delete[] result; return testConfigs; } @@ -189,14 +204,16 @@ class SelectDeviceTest : public tests::AutoTest, public ::testing::TestWithParam } void SetUp() override { - ON_CALL(*plugin, select_device).WillByDefault([this](const std::vector& metaDevices, - const std::string& netPrecision, unsigned int priority) { - return plugin->Plugin::select_device(metaDevices, netPrecision, priority); - }); - ON_CALL(*plugin, get_valid_device) - .WillByDefault([this](const std::vector& metaDevices, const std::string& netPrecision) { - return plugin->Plugin::get_valid_device(metaDevices, netPrecision); - }); + ON_CALL(*plugin, select_device) + .WillByDefault([this](const std::vector& metaDevices, + const std::string& netPrecision, + unsigned int priority) { + return plugin->Plugin::select_device(metaDevices, netPrecision, priority); + }); + ON_CALL(*plugin, get_valid_device) + .WillByDefault([this](const std::vector& metaDevices, const std::string& netPrecision) { + return plugin->Plugin::get_valid_device(metaDevices, netPrecision); + }); } }; @@ -220,13 +237,12 @@ TEST_P(SelectDeviceTest, SelectDevice) { if (throwExcept) { ASSERT_THROW(plugin->select_device(devices, netPrecision, 0), ov::Exception); } else { - auto result = plugin->select_device(devices, netPrecision, 0); + auto result = plugin->select_device(devices, netPrecision, 0); compare(result, expect); } } - - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, SelectDeviceTest, - ::testing::ValuesIn(SelectDeviceTest::CreateConfigs()), - SelectDeviceTest::getTestCaseName); \ No newline at end of file +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + SelectDeviceTest, + ::testing::ValuesIn(SelectDeviceTest::CreateConfigs()), + SelectDeviceTest::getTestCaseName); \ No newline at end of file diff --git a/src/plugins/auto/tests/unit/set_log_level_test.cpp b/src/plugins/auto/tests/unit/set_log_level_test.cpp index bba4687ba331bd..cbc7e5235fe71f 100644 --- a/src/plugins/auto/tests/unit/set_log_level_test.cpp +++ b/src/plugins/auto/tests/unit/set_log_level_test.cpp @@ -4,14 +4,14 @@ #include "include/auto_unit_test.hpp" namespace { -void custom_unsetenv(const char *name) { +void custom_unsetenv(const char* name) { #ifdef _WIN32 _putenv((std::string(name) + "=").c_str()); #else ::unsetenv(name); #endif } -} // namespace +} // namespace using ConfigParams = std::tuple; using namespace ov::mock_auto_plugin; @@ -28,9 +28,10 @@ class AutoSetLogLevel : public tests::AutoTest, public ::testing::TestWithParam< } void SetUp() override { - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), - ::testing::Matcher(_))) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(_), + ::testing::Matcher(_))) .WillByDefault(Return(mockExeNetwork)); metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}}; @@ -57,8 +58,12 @@ TEST_P(AutoSetLogLevel, setLogLevelFromConfig) { plugin->set_device_name("AUTO"); plugin->compile_model(model, config); int a = 0; - DEBUG_RUN([&a](){a++;}); - INFO_RUN([&a](){a++;}); + DEBUG_RUN([&a]() { + a++; + }); + INFO_RUN([&a]() { + a++; + }); if (log_level == "LOG_DEBUG" || log_level == "LOG_TRACE") { EXPECT_EQ(a, 2); } else if (log_level == "LOG_INFO") { diff --git a/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp b/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp index afba53502e1b92..3618dcb27ee425 100644 --- a/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp +++ b/src/plugins/auto/tests/unit/startup_fallback_property_test.cpp @@ -5,8 +5,7 @@ using namespace ov::mock_auto_plugin; -using ConfigParams = std::tuple; +using ConfigParams = std::tuple; // define a matcher if all the elements of subMap are contained in the map. MATCHER_P(MapContains, subMap, "Check if all the elements of the subMap are contained in the map.") { @@ -32,17 +31,19 @@ class AutoStartupFallback : public tests::AutoTest, public ::testing::TestWithPa public: void SetUp() override { plugin->set_device_name("AUTO"); - ON_CALL(*core, compile_model(::testing::Matcher&>(_), - ::testing::Matcher(_), _)) + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(_), + _)) .WillByDefault(Return(mockExeNetwork)); metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}}; ON_CALL(*plugin, parse_meta_devices(_, _)).WillByDefault(Return(metaDevices)); ON_CALL(*plugin, get_valid_device) - .WillByDefault([](const std::vector& metaDevices, const std::string& netPrecision) { - std::list devices(metaDevices.begin(), metaDevices.end()); - return devices; - }); - ON_CALL(*plugin, select_device(_, _, _)).WillByDefault(Return(metaDevices[1])); + .WillByDefault([](const std::vector& metaDevices, const std::string& netPrecision) { + std::list devices(metaDevices.begin(), metaDevices.end()); + return devices; + }); + ON_CALL(*plugin, select_device(_, _, _)).WillByDefault(Return(metaDevices[1])); } }; @@ -52,30 +53,24 @@ TEST_P(AutoStartupFallback, propertytest) { ov::AnyMap config; std::tie(startup_fallback, config) = this->GetParam(); - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_GPU), _)) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(ov::test::utils::DEVICE_GPU), + _)) .Times(1); if (startup_fallback) { std::map test_map = {{"PERFORMANCE_HINT", "LATENCY"}}; - EXPECT_CALL( - *core, - compile_model(::testing::Matcher&>(_), - ::testing::Matcher(ov::test::utils::DEVICE_CPU), - ::testing::Matcher(MapContains(test_map)))) + EXPECT_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(ov::test::utils::DEVICE_CPU), + ::testing::Matcher(MapContains(test_map)))) .Times(1); } ASSERT_NO_THROW(plugin->compile_model(model, config)); } -const std::vector testConfigs = {ConfigParams {true, {{"ENABLE_STARTUP_FALLBACK", "YES"}}}, - ConfigParams {false, {{"ENABLE_STARTUP_FALLBACK", "NO"}}} - }; - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_StartupFallback, - AutoStartupFallback, - ::testing::ValuesIn(testConfigs)); - +const std::vector testConfigs = {ConfigParams{true, {{"ENABLE_STARTUP_FALLBACK", "YES"}}}, + ConfigParams{false, {{"ENABLE_STARTUP_FALLBACK", "NO"}}}}; +INSTANTIATE_TEST_SUITE_P(smoke_Auto_StartupFallback, AutoStartupFallback, ::testing::ValuesIn(testConfigs)); diff --git a/src/plugins/auto_batch/src/sync_infer_request.cpp b/src/plugins/auto_batch/src/sync_infer_request.cpp index 4342ad6a55dfb9..c766c521cea27c 100644 --- a/src/plugins/auto_batch/src/sync_infer_request.cpp +++ b/src/plugins/auto_batch/src/sync_infer_request.cpp @@ -86,7 +86,9 @@ void SyncInferRequest::set_tensors_to_another_request(ov::SoPtrget_element_type(); - if (req->get_tensor(it)->data(type) != tensor->data(type)) { + bool is_remote = std::dynamic_pointer_cast(tensor._ptr) || + std::dynamic_pointer_cast(req->get_tensor(it)._ptr); + if (is_remote || req->get_tensor(it)->data(type) != tensor->data(type)) { req->set_tensor(it, tensor); } } @@ -95,7 +97,9 @@ void SyncInferRequest::set_tensors_to_another_request(ov::SoPtrget_element_type(); - if (req->get_tensor(it)->data(type) != tensor->data(type)) { + bool is_remote = std::dynamic_pointer_cast(tensor._ptr) || + std::dynamic_pointer_cast(req->get_tensor(it)._ptr); + if (is_remote || req->get_tensor(it)->data(type) != tensor->data(type)) { req->set_tensor(it, tensor); } } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp index 322a5b2914b547..52a4bee4fbc720 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_network_base.cpp @@ -11,9 +11,6 @@ namespace { const std::vector> configs = { {}, }; - const std::vector> multiConfigs = { - {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}} - }; const std::vector> heteroConfigs = { {{"TARGET_FALLBACK", ov::test::utils::DEVICE_CPU}}}; @@ -24,18 +21,6 @@ namespace { ::testing::ValuesIn(configs)), ExecutableNetworkBaseTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, ExecutableNetworkBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - ExecutableNetworkBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecutableNetworkBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - ExecutableNetworkBaseTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, ExecutableNetworkBaseTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_HETERO), @@ -54,34 +39,10 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} }; - const std::vector> AutoConfigsSetPrc = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}}, - }; - - const std::vector> MultiConfigsSetPrc = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecNetSetPrecision, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configSetPrc)), ExecNetSetPrecision::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigsSetPrc)), - ExecNetSetPrecision::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigsSetPrc)), - ExecNetSetPrecision::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp index c821561cb798f3..1e5badc668ffb0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp @@ -19,23 +19,23 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); // // Executable Network GetConfig / SetConfig diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp index af232aae5ba9bd..1819bd0cc02198 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp @@ -12,25 +12,9 @@ const std::vector> configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector> multiConfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCallbackTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestCallbackTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp index 59aba8a26f5e32..6d3dd1bfbd2e34 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp @@ -10,10 +10,6 @@ namespace { {} }; - const std::vector> multiConfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - const std::vector> InConfigs = { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, @@ -23,41 +19,10 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}} }; - const std::vector> MultiInConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestConfigTest, ::testing::Combine( ::testing::Values(1u), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestConfigTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestConfigTest, - ::testing::Combine( - ::testing::Values(1u), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestConfigTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests_, InferRequestConfigTest, - ::testing::Combine( - ::testing::Values(1u), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiInConfigs)), - InferRequestConfigTest::getTestCaseName); - - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp index 1c5fc6437eeb68..f93876de32ce21 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp @@ -15,30 +15,9 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; - const std::vector> Multiconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - - const std::vector> Autoconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestIOBBlobTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestIOBBlobTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestIOBBlobTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestIOBBlobTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestIOBBlobTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestIOBBlobTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp index e409ad7a866935..00bd57165b7ba3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/memory_states.cpp @@ -17,30 +17,8 @@ std::vector memoryStateTestCases = { ov::test::utils::DEVICE_HETERO, {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})}; -std::vector memoryStateAutoTestCases = { - memoryStateParams(InferRequestVariableStateTest::getNetwork(), - {"c_1-3", "r_1-3"}, - ov::test::utils::DEVICE_AUTO, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})}; - -std::vector memoryStateMultiTestCases = { - memoryStateParams(InferRequestVariableStateTest::getNetwork(), - {"c_1-3", "r_1-3"}, - ov::test::utils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})}; - INSTANTIATE_TEST_SUITE_P(smoke_VariableStateBasic, InferRequestVariableStateTest, ::testing::ValuesIn(memoryStateTestCases), InferRequestVariableStateTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestVariableStateTest, - ::testing::ValuesIn(memoryStateAutoTestCases), - InferRequestVariableStateTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestVariableStateTest, - ::testing::ValuesIn(memoryStateMultiTestCases), - InferRequestVariableStateTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp index ada6236dd61dc4..3d51e3b53ebcb7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/multitheading.cpp @@ -15,26 +15,9 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; - const std::vector> Multiconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestMultithreadingTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestMultithreadingTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Multiconfigs)), - InferRequestMultithreadingTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp index 0a74955fc4ea18..f1290d3f6e2564 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp @@ -35,30 +35,9 @@ const std::vector> configs = { {} }; -const std::vector> Multiconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} -}; - -const std::vector> Autoconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestPerfCountersTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp index dae5799d5a99d2..056ac921676719 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp @@ -15,8 +15,6 @@ const std::vector BlobTypes = { }; const std::map cpuConfig{}; //nothing special -const std::map autoConfig{}; -const std::map multiConfig{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}; const std::map heteroConfig{{ "TARGET_FALLBACK", ov::test::utils::DEVICE_CPU }}; INSTANTIATE_TEST_SUITE_P(smoke_Behavior, InferRequestSetBlobByType, @@ -25,19 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Behavior, InferRequestSetBlobByType, ::testing::Values(cpuConfig)), InferRequestSetBlobByType::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::Values(multiConfig)), - InferRequestSetBlobByType::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU), - ::testing::Values(autoConfig)), - InferRequestSetBlobByType::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Hetero, InferRequestSetBlobByType, ::testing::Combine(::testing::ValuesIn(BlobTypes), ::testing::Values(ov::test::utils::DEVICE_HETERO), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp index 22c9edb1a14aa3..451bd8eb3b98ed 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp @@ -15,30 +15,9 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; - const std::vector> Multiconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - - const std::vector> Autoconfigs = { - {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestWaitTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), InferRequestWaitTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - InferRequestWaitTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - InferRequestWaitTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp index 52ea12486cbf58..0f71d3e80c30ad 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/core_integration.cpp @@ -17,7 +17,7 @@ namespace { INSTANTIATE_TEST_SUITE_P( smoke_OVClassCompiledModelGetPropertyTest, OVClassCompiledModelGetPropertyTest, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); const std::vector>> GetMetricTest_ExecutionDevice_CPU = { {"CPU", std::make_pair(ov::AnyMap{}, "CPU")}}; @@ -32,7 +32,7 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_OVClassCompiledModelGetIncorrectPropertyTest, OVClassCompiledModelGetIncorrectPropertyTest, - ::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU")); + ::testing::Values("CPU", "HETERO:CPU")); INSTANTIATE_TEST_SUITE_P( smoke_OVClassCompiledModelGetConfigTest, OVClassCompiledModelGetConfigTest, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp index 8673f298bb639f..255a87b07229c9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp @@ -11,9 +11,6 @@ namespace { const std::vector configs = { {}, }; - const std::vector multiConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} - }; const std::vector heteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; @@ -24,18 +21,6 @@ namespace { ::testing::ValuesIn(configs)), OVCompiledModelBaseTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - OVCompiledModelBaseTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVCompiledModelBaseTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - OVCompiledModelBaseTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_HETERO), @@ -48,18 +33,6 @@ namespace { ::testing::ValuesIn(configs)), OVCompiledModelBaseTestOptional::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelBaseTestOptional, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - OVCompiledModelBaseTestOptional::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVCompiledModelBaseTestOptional, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - OVCompiledModelBaseTestOptional::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTestOptional, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_HETERO), @@ -77,14 +50,4 @@ namespace { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} }; - - const std::vector AutoConfigsSetPrc = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}, - }; - - const std::vector MultiConfigsSetPrc = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} - }; } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp index cb78e3c0a11fa8..16f4c82c74be24 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/ov_exec_net_import_export.cpp @@ -23,8 +23,6 @@ const std::vector netPrecisions = { const std::vector configs = { {}, }; -const std::vector multiConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; const std::vector heteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; @@ -37,14 +35,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::ValuesIn(configs)), OVCompiledGraphImportExportTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - OVCompiledGraphImportExportTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - OVCompiledGraphImportExportTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledGraphImportExportTest, ::testing::Combine(::testing::ValuesIn(netPrecisions), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp index 35a8001c0cc133..913315542db85b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/properties.cpp @@ -21,8 +21,7 @@ const std::vector auto_batch_inproperties = { INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVClassCompiledModelPropertiesIncorrectTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU, - ov::test::utils::DEVICE_HETERO, - ov::test::utils::DEVICE_MULTI, "AUTO:CPU"), + ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(inproperties)), OVClassCompiledModelPropertiesIncorrectTests::getTestCaseName); @@ -84,13 +83,6 @@ const std::vector hetero_properties = { InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, }; -const std::vector multi_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::num_streams(ov::streams::AUTO)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, -}; - const std::vector auto_batch_properties = { {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}}, {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}, @@ -111,12 +103,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, ::testing::ValuesIn(hetero_properties)), OVClassCompiledModelPropertiesTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - OVClassCompiledModelPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_properties)), - OVClassCompiledModelPropertiesTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVClassCompiledModelPropertiesTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_BATCH), @@ -125,22 +111,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, INSTANTIATE_TEST_SUITE_P(smoke_OVCompiledModelIncorrectDevice, OVCompiledModelIncorrectDevice, ::testing::Values("CPU")); - -const std::vector auto_multi_device_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::device::properties("CPU", ov::num_streams(4))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", ov::num_streams(4), ov::enable_profiling(true))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties(ov::AnyMap{{"CPU", ov::AnyMap{{ov::num_streams(4), ov::enable_profiling(true)}}}})}}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsNoThrow, - OVClassCompiledModelPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(auto_multi_device_properties)), - OVClassCompiledModelPropertiesTests::getTestCaseName); - const std::vector configsWithSecondaryProperties = { {ov::device::properties("CPU", ov::num_streams(4))}, {ov::device::properties("CPU", @@ -151,48 +121,6 @@ const std::vector configsWithSecondaryProperties = { ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}}; -const std::vector multiConfigsWithSecondaryProperties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), - ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}}; - -const std::vector autoConfigsWithSecondaryProperties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("AUTO", - ov::enable_profiling(false), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), - ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::device::properties("AUTO", - ov::enable_profiling(false), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties("AUTO", - ov::enable_profiling(false), - ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), - ov::device::properties("CPU", - ov::num_streams(4), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), - ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}}; - const std::vector heteroConfigsWithSecondaryProperties = { {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::device::properties("HETERO", @@ -227,52 +155,11 @@ const std::vector heteroConfigsWithSecondaryProperties = { // IE Class Load network INSTANTIATE_TEST_SUITE_P(smoke_CPUOVClassCompileModelWithCorrectPropertiesTest, OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("CPU", "AUTO:CPU", "MULTI:CPU", "HETERO:CPU"), + ::testing::Combine(::testing::Values("CPU", "HETERO:CPU"), ::testing::ValuesIn(configsWithSecondaryProperties))); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_OVClassCompileModelWithCorrectPropertiesTest, - OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("MULTI"), - ::testing::ValuesIn(multiConfigsWithSecondaryProperties))); - -INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassCompileModelWithCorrectPropertiesTest, - OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("AUTO"), - ::testing::ValuesIn(autoConfigsWithSecondaryProperties))); - INSTANTIATE_TEST_SUITE_P(smoke_HETERO_OVClassCompileModelWithCorrectPropertiesTest, OVClassCompileModelWithCorrectPropertiesTest, ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(heteroConfigsWithSecondaryProperties))); - -const std::vector> automultiExeDeviceConfigs = { - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU)}}, "CPU")}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiCompileModelBehaviorTests, - OVCompileModelGetExecutionDeviceTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(automultiExeDeviceConfigs)), - OVCompileModelGetExecutionDeviceTests::getTestCaseName); - -const std::vector multiDevicePriorityConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, - OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY, - ::testing::Combine(::testing::Values("MULTI", "AUTO"), - ::testing::ValuesIn(multiDevicePriorityConfigs)), - OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY::getTestCaseName); - -const std::vector multiModelPriorityConfigs = { - {ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::hint::model_priority(ov::hint::Priority::LOW)}, - {ov::hint::model_priority(ov::hint::Priority::DEFAULT)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest, - OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY, - ::testing::Combine(::testing::Values("AUTO:CPU"), - ::testing::ValuesIn(multiModelPriorityConfigs))); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp index d39adcafc87da3..2051aab35e71ea 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -15,25 +15,9 @@ const std::vector configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector multiConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), OVInferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - OVInferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - OVInferRequestCallbackTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp index a0d07511e31f0b..1423d023cbbac3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp @@ -20,41 +20,10 @@ std::vector configs = { {{ov::test::utils::DEVICE_CPU, {}}, {ov::test::utils::DEVICE_CPU, {}}} }; -std::vector AutoConfigs = { - { - { - ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)} - }, - {ov::test::utils::DEVICE_CPU, {}} - }, - { - { - ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)} - }, - {ov::test::utils::DEVICE_CPU, {}} - }, - { - { - ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)} - }, - {ov::test::utils::DEVICE_CPU, {}} - } -}; - INSTANTIATE_TEST_SUITE_P(BehaviorTests, OVInferConsistencyTest, ::testing::Combine( ::testing::Values(10),// inferRequest num ::testing::Values(10),// infer counts ::testing::ValuesIn(configs)), OVInferConsistencyTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Auto_BehaviorTests, OVInferConsistencyTest, - ::testing::Combine( - ::testing::Values(10),// inferRequest num - ::testing::Values(10),// infer counts - ::testing::ValuesIn(AutoConfigs)), - OVInferConsistencyTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 3b4049eaf05a3b..fa66f4a2c7801d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -18,10 +18,6 @@ const std::vector HeteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)} }; -const std::vector AutoConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - std::shared_ptr getFunction1() { const std::vector inputShape = {1, 4, 20, 20}; const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32; @@ -93,15 +89,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests, ::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(HeteroConfigs)), OVInferRequestDynamicTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestDynamicTests, - ::testing::Combine( - ::testing::Values(getFunction2()), - ::testing::Values(std::vector, std::vector>>{ - {{1, 4, 20, 20}, {1, 2, 20, 40}}, - {{2, 4, 20, 20}, {2, 2, 20, 40}}}), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - OVInferRequestDynamicTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp index a134c54772f118..f8030e4332296f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -17,10 +17,6 @@ const std::vector HeteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)} }; -const std::vector AutoConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferenceChaining, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), @@ -33,12 +29,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferenceChaining, ::testing::ValuesIn(HeteroConfigs)), OVInferenceChaining::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChaining, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - OVInferenceChaining::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferenceChainingStatic, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), @@ -50,10 +40,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferenceChainingStatic, ::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(HeteroConfigs)), OVInferenceChainingStatic::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChainingStatic, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - OVInferenceChainingStatic::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index 93b0bb59dfa0ea..07cd925b940595 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -15,14 +15,6 @@ const std::vector configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector Multiconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - -const std::vector Autoconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - const std::vector emptyConfigs = {{}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, @@ -31,18 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, ::testing::ValuesIn(configs)), OVInferRequestIOTensorTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestIOTensorTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestIOTensorTest::getTestCaseName); - std::vector prcs = { ov::element::boolean, ov::element::bf16, @@ -69,38 +49,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorSetPrecision ::testing::ValuesIn(configs)), OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCheckTensorPrecision, ::testing::Combine( ::testing::ValuesIn(prcs), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(emptyConfigs)), OVInferRequestCheckTensorPrecision::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCheckTensorPrecision, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestCheckTensorPrecision::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCheckTensorPrecision, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestCheckTensorPrecision::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp index 6bae74745729c0..2c8678165426b3 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/iteration_chaining.cpp @@ -17,10 +17,6 @@ const std::vector HeteroConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_CPU)} }; -const std::vector AutoConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVIterationChaining, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), @@ -32,11 +28,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVIterationChaining, ::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(HeteroConfigs)), OVIterationChaining::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVIterationChaining, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - OVIterationChaining::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index 4aa193dd0ff330..f341cb560def20 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -16,26 +16,9 @@ const std::vector configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector Multiconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), OVInferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestMultithreadingTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index f051edb92958f4..959bd3fe6cc9fe 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -11,29 +11,9 @@ const std::vector configs = { {} }; -const std::vector Multiconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - -const std::vector Autoconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), OVInferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestPerfCountersTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp index 53ead79c66e1dc..a1102d17577e02 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -16,30 +16,9 @@ const std::vector configs = { {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector Multiconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - -const std::vector Autoconfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)} -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(configs)), OVInferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs)), - OVInferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs)), - OVInferRequestWaitTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index b41b969c24bc66..efd48c350c7e9a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -133,53 +133,6 @@ namespace { ::testing::ValuesIn(autoConfigs)), CompileModelCacheTestBase::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU, CompileModelCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(CompileModelCacheTestBase::getNumericAnyTypeFunctions()), - ::testing::ValuesIn(precisionsCPU), - ::testing::ValuesIn(batchSizesCPU), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs)), - CompileModelCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU_Float, CompileModelCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(CompileModelCacheTestBase::getFloatingPointOnlyFunctions()), - ::testing::ValuesIn(floatPrecisionsCPU), - ::testing::ValuesIn(batchSizesCPU), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs)), - CompileModelCacheTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU_Internal, CompileModelCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(internal_functions_cpu()), - ::testing::ValuesIn(precisionsCPUInternal), - ::testing::ValuesIn(batchSizesCPUInternal), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs)), - CompileModelCacheTestBase::getTestCaseName); - - const std::vector LoadFromFileConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU)}, - }; - const std::vector TestTargets = - {ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU, CompileModelLoadFromFileTestBase, - ::testing::Combine( - ::testing::ValuesIn(TestTargets), - ::testing::ValuesIn(LoadFromFileConfigs)), - CompileModelLoadFromFileTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_CPU, - CompileModelLoadFromMemoryTestBase, - ::testing::Combine(::testing::ValuesIn(TestTargets), - ::testing::ValuesIn(LoadFromFileConfigs)), - CompileModelLoadFromMemoryTestBase::getTestCaseName); - const std::vector CpuConfigs = { {ov::num_streams(2)}, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index a9201b7674bdc3..fcc23fe1a8d906 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -28,25 +28,4 @@ INSTANTIATE_TEST_SUITE_P( // IE Class Query model INSTANTIATE_TEST_SUITE_P(smoke_OVClassQueryModelTest, OVClassQueryModelTest, ::testing::Values("CPU")); - -const std::vector configsWithEmpty = {{}}; -const std::vector configsWithMetaPlugin = {{ov::device::priorities("AUTO")}, - {ov::device::priorities("MULTI")}, - {ov::device::priorities("AUTO", "MULTI")}, - {ov::device::priorities("AUTO", "CPU")}, - {ov::device::priorities("MULTI", "CPU")}}; - -INSTANTIATE_TEST_SUITE_P( - smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyWithEmptyConfigTest, - OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest, - ::testing::Combine(::testing::Values("MULTI:AUTO", "AUTO:MULTI", "MULTI:CPU,AUTO", "AUTO:CPU,MULTI"), - ::testing::ValuesIn(configsWithEmpty)), - ::testing::PrintToStringParamName()); - -INSTANTIATE_TEST_SUITE_P(smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyTest, - OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest, - ::testing::Combine(::testing::Values("MULTI", "AUTO"), - ::testing::ValuesIn(configsWithMetaPlugin)), - ::testing::PrintToStringParamName()); - } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp index a46121e053bee5..cc6f8cf3217187 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp @@ -11,9 +11,7 @@ namespace { OVHoldersTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, OVHoldersTest, - ::testing::Values("AUTO:CPU", - "MULTI:CPU", - //ov::test::utils::DEVICE_BATCH, + ::testing::Values(//ov::test::utils::DEVICE_BATCH, "HETERO:CPU"), OVHoldersTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 4e7dfdaeac914d..b736a5ce7b6be2 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -26,28 +26,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::ValuesIn(cpu_properties)), OVPropertiesTests::getTestCaseName); -const std::vector multi_Auto_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::intel_auto::device_bind_buffer("YES")}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::intel_auto::device_bind_buffer("NO")}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::intel_auto::enable_startup_fallback("YES")}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::intel_auto::enable_startup_fallback("NO")}}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiBehaviorTests, - OVPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_Auto_properties)), - OVPropertiesTests::getTestCaseName); - const std::vector cpu_setcore_properties = { {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), ov::hint::num_requests(2), @@ -64,97 +42,24 @@ INSTANTIATE_TEST_SUITE_P(smoke_cpuCompileModelBehaviorTests, ::testing::ValuesIn(cpu_compileModel_properties)), OVSetPropComplieModleGetPropTests::getTestCaseName); -const std::vector multi_setcore_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::HIGH)}}; -const std::vector multi_compileModel_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_MultiCompileModelBehaviorTests, - OVSetPropComplieModleGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_setcore_properties), - ::testing::ValuesIn(multi_compileModel_properties)), - OVSetPropComplieModleGetPropTests::getTestCaseName); - -const std::vector auto_setcore_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, -}; -const std::vector auto_compileModel_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; -INSTANTIATE_TEST_SUITE_P(smoke_AutoCompileModelBehaviorTests, - OVSetPropComplieModleGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_setcore_properties), - ::testing::ValuesIn(auto_compileModel_properties)), - OVSetPropComplieModleGetPropTests::getTestCaseName); - -const std::vector default_properties = {{ov::enable_profiling(false)}, - {ov::log::level("LOG_NONE")}, - {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, - {ov::intel_auto::device_bind_buffer(false)}, - {ov::intel_auto::enable_startup_fallback(true)}, - {ov::device::priorities("")}}; -INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests, - OVPropertiesDefaultTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(default_properties)), - OVPropertiesDefaultTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVPropertiesDefaultSupportedTests, - ::testing::Values(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_AUTO)); - -const std::vector auto_multi_incorrect_device_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::num_streams(4), - ov::device::properties("CPU", ov::num_streams(4))}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU), - ov::num_streams(4), - ov::device::properties("CPU", ov::num_streams(4), ov::enable_profiling(true))}}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsThrow, - OVSetUnsupportPropCompileModelWithoutConfigTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_HETERO), - ::testing::ValuesIn(auto_multi_incorrect_device_properties)), - OVSetUnsupportPropCompileModelWithoutConfigTests::getTestCaseName); + ::testing::Values(ov::test::utils::DEVICE_CPU)); // // IE Class GetMetric // -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiHeteroOVGetMetricPropsTest, +INSTANTIATE_TEST_SUITE_P(smoke_HeteroOVGetMetricPropsTest, OVGetMetricPropsTest, - ::testing::Values("MULTI", "HETERO", "AUTO")); + ::testing::Values("HETERO")); INSTANTIATE_TEST_SUITE_P(smoke_OVGetMetricPropsTest, OVGetMetricPropsTest, ::testing::Values("CPU")); INSTANTIATE_TEST_SUITE_P( - smoke_AutoMultiHeteroOVCheckGetSupportedROMetricsPropsTests, + smoke_HeteroOVCheckGetSupportedROMetricsPropsTests, OVCheckGetSupportedROMetricsPropsTests, - ::testing::Combine(::testing::Values("MULTI", "HETERO", "AUTO"), + ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(OVCheckGetSupportedROMetricsPropsTests::configureProperties( {ov::device::full_name.name()}))), OVCheckGetSupportedROMetricsPropsTests::getTestCaseName); @@ -171,29 +76,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_OVGetAvailableDevicesPropsTest, OVGetAvailableDevicesPropsTest, ::testing::Values("CPU")); -INSTANTIATE_TEST_SUITE_P( - OVCheckSetSupportedRWMandatoryMetricsPropsTests, - OVCheckSetSupportedRWMetricsPropsTests, - ::testing::Combine(::testing::Values("MULTI:CPU", "AUTO:CPU"), - ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( - {ov::hint::model_priority.name(), ov::log::level.name()}))), - OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P( - OVCheckSetSupportedRWOptionalMetricsPropsTests, - OVCheckSetSupportedRWMetricsPropsTests, - ::testing::Combine(::testing::Values("MULTI:CPU", "AUTO:CPU"), - ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWOptionalPropertiesValues( - {ov::hint::enable_hyper_threading.name(), - ov::hint::enable_cpu_pinning.name(), - ov::hint::scheduling_core_type.name()}))), - OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); - const std::vector multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_CPU)}}; INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetDevicePriorityConfigPropsTest, OVClassSetDevicePriorityConfigPropsTest, - ::testing::Combine(::testing::Values("MULTI", "AUTO", "HETERO"), + ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(multiConfigs))); const std::vector configsDeviceProperties = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp index 3e270874ce9542..5cdbb8fbd7285b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp @@ -67,67 +67,12 @@ namespace { {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "3"}}, }; - const std::vector> MultiConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}} - }; - - const std::vector> AutoConfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_NONE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_ERROR}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_WARNING}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_INFO}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_DEBUG}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_TRACE}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_LOW}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, CorrectConfigTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(Configs)), CorrectConfigTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs)), - CorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs)), - CorrectConfigTests::getTestCaseName); - const std::vector> inconfigs = { {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, @@ -138,89 +83,18 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, }; - const std::vector> multiinconfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, "OFF"}}, - }; - - const std::vector> autoinconfigs = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}} - }; - - const std::vector> multiconf = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, IncorrectConfigTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(inconfigs)), IncorrectConfigTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs)), - IncorrectConfigTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, IncorrectConfigAPITests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(inconfigs)), IncorrectConfigAPITests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs)), - IncorrectConfigAPITests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs)), - IncorrectConfigAPITests::getTestCaseName); - const std::vector> ConfigsCheck = { {}, {{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}}, @@ -256,34 +130,10 @@ namespace { {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}, }}; - const std::vector> auto_multi_prop_config = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}}; - - const std::vector> auto_multi_loadNetWork_config = { - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, SetPropLoadNetWorkGetPropTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(cpu_prop_config), ::testing::ValuesIn(cpu_loadNetWork_config)), SetPropLoadNetWorkGetPropTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_multi_prop_config), - ::testing::ValuesIn(auto_multi_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_multi_prop_config), - ::testing::ValuesIn(auto_multi_loadNetWork_config)), - SetPropLoadNetWorkGetPropTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp index 7d8f93b2144e88..6934ffaa19f78c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp @@ -30,11 +30,11 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES, @@ -42,11 +42,11 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, - ::testing::Values("CPU", "MULTI", "AUTO")); + ::testing::Values("CPU")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, @@ -58,11 +58,11 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, - ::testing::Values("CPU", "MULTI", "HETERO", "AUTO")); + ::testing::Values("CPU", "HETERO")); INSTANTIATE_TEST_SUITE_P( smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp index 0495f2b4cbbb6a..5f1ada306d367a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp @@ -5,9 +5,6 @@ #include #ifdef __GLIBC__ #include -#if __GLIBC_MINOR__ >= 34 - #define ENABLETESTMULTI -#endif #endif namespace { @@ -15,10 +12,6 @@ namespace { const Params params[] = { std::tuple{ ov::test::utils::DEVICE_CPU, {{ CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }}}, std::tuple{ ov::test::utils::DEVICE_HETERO, {{ "TARGET_FALLBACK", ov::test::utils::DEVICE_CPU }}}, -#ifdef ENABLETESTMULTI - std::tuple{ ov::test::utils::DEVICE_MULTI, {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU }}}, - std::tuple{ ov::test::utils::DEVICE_AUTO, {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU }}}, -#endif }; const Params paramsStreams[] = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp index f3f3588d375617..49664de10b4c2d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp @@ -23,10 +23,6 @@ namespace { {{ "TARGET_FALLBACK" , ov::test::utils::DEVICE_CPU}} }; - const std::vector> multiConfigs = { - {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}} - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), @@ -41,21 +37,6 @@ namespace { ::testing::ValuesIn(heteroConfigs)), InferRequestPreprocessTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessTest::getTestCaseName); - - const std::vector ioPrecisions = { InferenceEngine::Precision::FP32, InferenceEngine::Precision::U8 @@ -125,63 +106,6 @@ namespace { ::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(heteroConfigs)), InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessConversionTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(ioPrecisions), - ::testing::ValuesIn(ioPrecisions), - ::testing::ValuesIn(netLayouts), - ::testing::ValuesIn(ioLayouts), - ::testing::ValuesIn(ioLayouts), - ::testing::Bool(), - ::testing::Bool(), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessConversionTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessDynamicallyInSetBlobTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Bool(), - ::testing::Bool(), - ::testing::ValuesIn(netLayouts), - ::testing::Bool(), - ::testing::Bool(), - ::testing::Values(true), // only SetBlob - ::testing::Values(true), // only SetBlob - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessConversionTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(ioPrecisions), - ::testing::ValuesIn(ioPrecisions), - ::testing::ValuesIn(netLayouts), - ::testing::ValuesIn(ioLayouts), - ::testing::ValuesIn(ioLayouts), - ::testing::Bool(), - ::testing::Bool(), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessConversionTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessDynamicallyInSetBlobTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Bool(), - ::testing::Bool(), - ::testing::ValuesIn(netLayouts), - ::testing::Bool(), - ::testing::Bool(), - ::testing::Values(true), // only SetBlob - ::testing::Values(true), // only SetBlob - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs)), - InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); - } // namespace #endif // ENABLE_GAPI_PREPROCESSING diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp index d0bcd50ef58e04..bcc0130ca8e8ab 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp @@ -10,14 +10,6 @@ namespace { ::testing::Values(ov::test::utils::DEVICE_CPU), VersionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, VersionTest, - ::testing::Values(ov::test::utils::DEVICE_MULTI), - VersionTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, VersionTest, - ::testing::Values(ov::test::utils::DEVICE_AUTO), - VersionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, VersionTest, ::testing::Values(ov::test::utils::DEVICE_HETERO), VersionTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/multi/cpu_remote_blob_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/multi/cpu_remote_blob_tests.cpp deleted file mode 100644 index e3bf419f1208b2..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/multi/cpu_remote_blob_tests.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "multi/multi_remote_blob_tests.hpp" -#include "common_test_utils/test_constants.hpp" - -const std::vector device_names_and_support_for_remote_blobs { - {{CPU}, false, {}}, // CPU via MULTI -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobCPU, MultiDevice_SupportTest, - ::testing::ValuesIn(device_names_and_support_for_remote_blobs), MultiDevice_SupportTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index b9caace0239ab2..3daad41fd0fe5a 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -68,9 +68,6 @@ std::vector disabledTestPatterns() { R"(.*NonZeroLayerTest.*)", // Not expected behavior R"(.*Behavior.*InferRequestSetBlobByType.*Batched.*)", - R"(.*Auto.*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableWithIncorrectConfig.*)", - R"(.*(Auto|Multi).*Behavior.*CorrectConfigAPITests.*CanSetExclusiveAsyncRequests.*)", - R"(.*(Auto|Multi).*Behavior.*IncorrectConfigTests.*CanNotLoadNetworkWithIncorrectConfig.*)", R"(.*OVCompiledModelBaseTest.*(CanGetInputsInfoAndCheck|canSetConfigToCompiledModel).*)", R"(.*Behavior.*CorrectConfigCheck.*(canSetConfigAndCheckGetConfig|canSetConfigTwiceAndCheckGetConfig).*CPU_BIND_THREAD=YES.*)", // Issue: 72021 Unreasonable abs_threshold for comparing bf16 results @@ -88,24 +85,18 @@ std::vector disabledTestPatterns() { R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*ExecGraphInfo.*)", R"(.*Hetero.*Behavior.*OVCompiledModelBaseTest.*ExecGraphInfo.*)", R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*CanCreateTwoExeNetworksAndCheckFunction.*)", - // TODO: 104942 - R"(.*(Auto|Multi).*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableAndCheckConfig.*)", - R"(.*(Auto|Multi).*SetPropLoadNetWorkGetPropTests.*)", R"(.*Hetero.*Behavior.*OVCompiledModelBaseTest.*canCreateTwoCompiledModelAndCheckTheir.*)", // CPU does not support dynamic rank // Issue: 66778 R"(.*smoke_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", R"(.*smoke_Hetero_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", - R"(.*smoke_Auto_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)", R"(.*smoke_BehaviorTests.*DynamicOutputToDynamicInput.*)", R"(.*smoke_BehaviorTests.*DynamicInputToDynamicOutput.*)", R"(.*smoke_Hetero_BehaviorTests.*DynamicOutputToDynamicInput.*)", R"(.*smoke_Hetero_BehaviorTests.*DynamicInputToDynamicOutput.*)", - R"(.*smoke_Auto_BehaviorTests.*DynamicOutputToDynamicInput.*)", - R"(.*smoke_Auto_BehaviorTests.*DynamicInputToDynamicOutput.*)", // unsupported metrics R"(.*OVGetMetricPropsTest.*OVGetMetricPropsTest.*(MAX_BATCH_SIZE).*)", - R"(.*smoke_AutoMultiHeteroOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", + R"(.*smoke_HeteroOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", // supports only '' as device id R"(.*OVClassQueryModelTest.*QueryModelWithDeviceID.*)", @@ -154,8 +145,6 @@ std::vector disabledTestPatterns() { R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*KSOFunction.*)", R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*NonMaxSuppression.*)", R"(.*CompileModelCacheTestBase.*CompareWithRefImpl.*Nms.*)", - // Issue: 76980 - R"(.*smoke_Auto_BehaviorTests.*InferDynamicNetwork/.*)", // Issue: 105838 R"(smoke_NmsLayerTest.*)", // Issue: 95590 @@ -167,8 +156,6 @@ std::vector disabledTestPatterns() { // The kernel does not have such garbage. The diff 0.000000745 is taken into account in calculations and affects further type conversion. // Reorder->GridSample->Reorder also does not work here. Potential fix is to use nearest conversion instead of truncation. R"(.*GridSampleLayerTestCPU.*(BILINEAR|BICUBIC).*(i32|i8).*)", - // AUTO does not support import / export - R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)", // AdaptiveAvgPool is converted into Reduce op for suitable parameters. CPU Reduce impl doesn't support non planar layout for 3D case R"(.*StaticAdaPoolAvg3DLayoutTest.*OS=\(1\).*_inFmts=(nwc|nCw16c|nCw8c).*)", // Issue: 111404 @@ -186,8 +173,8 @@ std::vector disabledTestPatterns() { // Issue: 106939 R"(.*ScatterNDUpdateLayerCPUTest.*-1.-1.-1.-2.-2.-2.*)", // New plugin API doesn't support changes of pre-processing - R"(.*(Auto|Multi|Hetero).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", - R"(.*(Auto|Multi|Hetero).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", + R"(.*(Hetero).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", + R"(.*(Hetero).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", // TODO: for 22.2 (Issue 68949) R"(.*smoke_AutoBatching_CPU/AutoBatching_Test_DetectionOutput.*)", // Issue: 117837 diff --git a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt index 480717eaacb912..60afb801970b01 100644 --- a/src/plugins/intel_gpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_gpu/tests/functional/CMakeLists.txt @@ -57,14 +57,3 @@ if(WIN32) target_compile_definitions(${TARGET_NAME} PRIVATE ENABLE_DX11) target_link_libraries(${TARGET_NAME} PRIVATE d3d11 dxgi) endif() - -if (ENABLE_INTEL_CPU) - set_source_files_properties( - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/ov_plugin/life_time.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/multi/gpu_remote_blob_tests.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/infer_request/memory_states.cpp" - "${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp" - PROPERTIES COMPILE_DEFINITIONS ENABLE_INTEL_CPU=1) -endif() diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_net_base.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_net_base.cpp index 8e048d5d3b9283..ce1b6c2c7c02b3 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_net_base.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/exec_net_base.cpp @@ -44,20 +44,6 @@ auto configsSetPrc = []() { InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}}; }; -auto multiConfig = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, - InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}}; -}; - -auto autoConfig = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}, - }; -}; - auto autoBatchConfig = []() { return std::vector>{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -73,20 +59,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecNetSetPrecision, ::testing::ValuesIn(configsSetPrc())), ExecNetSetPrecision::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfig())), - ExecNetSetPrecision::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetSetPrecision, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfig())), - ExecNetSetPrecision::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, ExecNetSetPrecision, ::testing::Combine( ::testing::ValuesIn(netPrecisions), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp index 68920e41fddf64..c27224d0107011 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/executable_network/get_metric.cpp @@ -22,27 +22,27 @@ namespace { INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU") + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU") ); // diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp index f30aabd31f08d8..6cec708b6756ea 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/callback.cpp @@ -12,18 +12,6 @@ auto configs = []() { }; }; -auto multiConfigs = []() { - return std::vector>{ - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_GPU}}}; -}; - -auto autoConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}}; -}; - auto autoBatchConfigs = []() { return std::vector>{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -38,18 +26,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCallbackTests, ::testing::ValuesIn(configs())), InferRequestCallbackTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs())), - InferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - InferRequestCallbackTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestCallbackTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp index 88669d1bc716a1..62b0bd6fa64f9e 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/config.cpp @@ -10,22 +10,10 @@ auto configs = []() { return std::vector>{{}}; }; -auto multiConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestConfigTest, ::testing::Combine( ::testing::Values(1u), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(configs())), InferRequestConfigTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestConfigTest, - ::testing::Combine( - ::testing::Values(1u), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs())), - InferRequestConfigTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp index 55345b782a37a4..ed0ae6944f9705 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/io_blob.cpp @@ -9,34 +9,9 @@ using namespace BehaviorTestsDefinitions; namespace { -auto configs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; -}; - -auto autoconfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - std::string(ov::test::utils::DEVICE_CPU) + "," + ov::test::utils::DEVICE_GPU}}}; -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestIOBBlobTest, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(std::map({}))), InferRequestIOBBlobTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestIOBBlobTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(configs())), - InferRequestIOBBlobTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestIOBBlobTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoconfigs())), - InferRequestIOBBlobTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp index 6c409783d786a4..484fee15c99d44 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/multithreading.cpp @@ -9,18 +9,6 @@ using namespace BehaviorTestsDefinitions; namespace { -auto configs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; -}; - -auto autoconfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - std::string(ov::test::utils::DEVICE_CPU) + "," + ov::test::utils::DEVICE_GPU}}}; -}; - auto auto_batch_configs = []() { return std::vector>{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -35,19 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestMultithreadingTests, ::testing::Values(std::map({}))), InferRequestMultithreadingTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(configs())), - InferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoconfigs())), - InferRequestMultithreadingTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp index a76185499e0e13..3165b94647180e 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/perf_counters.cpp @@ -10,20 +10,6 @@ auto configs = []() { return std::vector>{{}}; }; -auto Multiconfigs = - []() { - return std::vector>{ - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_GPU}}}; - }; - -auto AutoConfigs = - []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}}; - }; - auto AutoBatchConfigs = []() { return std::vector>{ @@ -39,18 +25,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::ValuesIn(configs())), InferRequestPerfCountersTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs())), - InferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestPerfCountersTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - InferRequestPerfCountersTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestPerfCountersTest, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp index d6a1026f9d15b0..7ffee02692fbb4 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/set_blob_by_type.cpp @@ -17,9 +17,7 @@ const std::vector BlobTypes = { auto gpuConfig = []() { return std::map{}; }; // nothing special -auto multiConfig = []() { - return std::map{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_GPU}}; -}; + auto heteroConfig = []() { return std::map{{"TARGET_FALLBACK", ov::test::utils::DEVICE_GPU}}; }; @@ -30,18 +28,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Behavior, InferRequestSetBlobByType, ::testing::Values(gpuConfig())), InferRequestSetBlobByType::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::Values(multiConfig())), - InferRequestSetBlobByType::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto, InferRequestSetBlobByType, - ::testing::Combine(::testing::ValuesIn(BlobTypes), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::Values(multiConfig())), - InferRequestSetBlobByType::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Hetero, InferRequestSetBlobByType, ::testing::Combine(::testing::ValuesIn(BlobTypes), ::testing::Values(ov::test::utils::DEVICE_HETERO), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp index 7a4e9988f73b67..703fbc677524e2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/infer_request/wait.cpp @@ -9,18 +9,6 @@ using namespace BehaviorTestsDefinitions; namespace { -auto configs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; -}; - -auto autoConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}}; -}; - auto autoBatchConfigs = []() { return std::vector>{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -35,18 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::Values(std::map({}))), InferRequestWaitTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(configs())), - InferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - InferRequestWaitTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - InferRequestWaitTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, InferRequestWaitTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp index a067a0ea08faf6..951108438ed190 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp @@ -20,14 +20,6 @@ auto autoBatchConfigs = []() { {CONFIG_KEY(AUTO_BATCH_TIMEOUT), "0 "}}}; }; -const std::vector autoConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU)}, -#ifdef ENABLE_INTEL_CPU - {ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}, -#endif -}; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelBaseTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_GPU), @@ -39,10 +31,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatchBehaviorTests, OVCompiledModelBaseTest, ::testing::Values(ov::test::utils::DEVICE_BATCH), ::testing::ValuesIn(autoBatchConfigs())), OVCompiledModelBaseTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - OVAutoExecutableNetworkTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs)), - OVCompiledModelBaseTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp index a51dc4156977f9..6a7176302f684f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp @@ -18,7 +18,7 @@ namespace { INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, OVClassCompiledModelGetPropertyTest, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU")); + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU")); const std::vector>> GetMetricTest_ExecutionDevice_GPU = { @@ -29,37 +29,13 @@ const std::vector>> G INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, OVClassCompiledModelGetPropertyTest_EXEC_DEVICES, ::testing::ValuesIn(GetMetricTest_ExecutionDevice_GPU)); - -auto multiDevicePriorityConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_CPU)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU)}, - {ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}}; -}; - -INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, - OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY, - ::testing::Combine(::testing::Values("MULTI", "AUTO"), - ::testing::ValuesIn(multiDevicePriorityConfigs()))); - -auto multiModelPriorityConfigs = []() { - return std::vector{{ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::hint::model_priority(ov::hint::Priority::LOW)}}; -}; - -INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetPropertyTest, - OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY, - ::testing::Combine(::testing::Values("AUTO"), - ::testing::ValuesIn(multiModelPriorityConfigs())), - OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY::getTestCaseName); - // // Executable Network GetConfig / SetConfig // INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetIncorrectPropertyTest, OVClassCompiledModelGetIncorrectPropertyTest, - ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU", "AUTO:GPU,CPU", "BATCH:GPU")); + ::testing::Values("GPU", "HETERO:GPU", "BATCH:GPU")); INSTANTIATE_TEST_SUITE_P(nightly_OVClassCompiledModelGetConfigTest, OVClassCompiledModelGetConfigTest, @@ -82,9 +58,7 @@ const std::vector incorrect_device_priorities_properties = {{ov::dev INSTANTIATE_TEST_SUITE_P(smoke_BehaviorIncorrectPropertiesTests, OVClassCompiledModelPropertiesIncorrectTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_HETERO), + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::ValuesIn(incorrect_device_priorities_properties)), OVClassCompiledModelPropertiesIncorrectTests::getTestCaseName); @@ -119,115 +93,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompileModelWithCorrectSecondaryProperties ::testing::ValuesIn(gpuCorrectConfigsWithSecondaryProperties())), ::testing::PrintToStringParamName()); -INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassCompileModelWithCorrectSecondaryPropertiesTest, +INSTANTIATE_TEST_SUITE_P(smoke_HETERO_OVClassCompileModelWithCorrectSecondaryPropertiesTest, OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("AUTO:GPU", "MULTI:GPU", "HETERO:GPU"), + ::testing::Combine(::testing::Values("HETERO:GPU"), ::testing::ValuesIn(gpuCorrectConfigsWithSecondaryProperties()))); -auto autoCorrectConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(true)}}; -}; - -auto autoCorrectConfigsWithSecondaryProperties = []() { - return std::vector{ - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties(ov::test::utils::DEVICE_AUTO, - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false))}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties(ov::test::utils::DEVICE_GPU, - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false))}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties(ov::test::utils::DEVICE_GPU, - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false)), - ov::device::properties(ov::test::utils::DEVICE_CPU, - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::allow_auto_batching(false))}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::device::properties("GPU.0", - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::allow_auto_batching(false)), - ov::device::properties(ov::test::utils::DEVICE_CPU, - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::allow_auto_batching(false))}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_OVClassCompileModelWithCorrectPropertiesAutoBatchingTest, - OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoCorrectConfigs()))); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_OVClassCompileModelWithCorrectSecondaryPropertiesTest, - OVClassCompileModelWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI, - ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoCorrectConfigsWithSecondaryProperties())), - ::testing::PrintToStringParamName()); - const std::vector batchCorrectConfigs = {{}}; INSTANTIATE_TEST_SUITE_P(smoke_Auto_Batch_OVClassCompileModelWithCorrectPropertiesAutoBatchingTest, OVClassCompileModelWithCorrectPropertiesTest, ::testing::Combine(::testing::Values("BATCH:GPU"), ::testing::ValuesIn(batchCorrectConfigs))); - -const std::vector> autoExeDeviceConfigs = { - std::make_pair(ov::AnyMap{{ov::device::priorities("GPU.0")}}, "GPU.0"), -#ifdef ENABLE_INTEL_CPU - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}}, - "undefined"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}}, - "CPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - "CPU,GPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - "GPU,CPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::allow_auto_batching(true)}}, - "GPU,CPU"), -#endif -}; - -const std::vector> multiExeDeviceConfigs = { - std::make_pair(ov::AnyMap{{ov::device::priorities("GPU.0")}}, "GPU.0"), -#ifdef ENABLE_INTEL_CPU - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}}, - "GPU,CPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}}, - "CPU,GPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - "CPU,GPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - "GPU,CPU"), - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::allow_auto_batching(true)}}, - "GPU,CPU"), -#endif -}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiCompileModelBehaviorTests, - OVCompileModelGetExecutionDeviceTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoExeDeviceConfigs)), - OVCompileModelGetExecutionDeviceTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MultiCompileModelBehaviorTests, - OVCompileModelGetExecutionDeviceTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiExeDeviceConfigs)), - OVCompileModelGetExecutionDeviceTests::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp index 6ce84fb09d28ea..fa63fdbea6ec9c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -17,10 +17,6 @@ auto configs = []() { }; }; -auto multiConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto autoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -35,18 +31,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests, ::testing::ValuesIn(configs())), OVInferRequestCallbackTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs())), - OVInferRequestCallbackTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCallbackTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(multiConfigs())), - OVInferRequestCallbackTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestCallbackTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp index 207d57b5b355f1..7d6a57ae2cc003 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_consistency.cpp @@ -20,71 +20,10 @@ auto configs = []() { return std::vector{{{ov::test::utils::DEVICE_GPU, {}}, {ov::test::utils::DEVICE_GPU, {}}}}; }; -auto AutoConfigs = []() { - return std::vector{{{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}}, - {ov::test::utils::DEVICE_GPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}}, - {ov::test::utils::DEVICE_GPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - {ov::test::utils::DEVICE_GPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU + "," + - ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}}, - {ov::test::utils::DEVICE_GPU, {}}, - {ov::test::utils::DEVICE_CPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU + "," + - ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}}, - {ov::test::utils::DEVICE_GPU, {}}, - {ov::test::utils::DEVICE_CPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU + "," + - ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - {ov::test::utils::DEVICE_GPU, {}}, - {ov::test::utils::DEVICE_CPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU + "," + - ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}}, - {ov::test::utils::DEVICE_CPU, {}}, - {ov::test::utils::DEVICE_GPU, {}}}}; -}; - -auto AutoBindConfigs = []() { - return std::vector{{{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_GPU + "," + - ov::test::utils::DEVICE_CPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::intel_auto::device_bind_buffer(true)}}, - {ov::test::utils::DEVICE_GPU, {}}, - {ov::test::utils::DEVICE_CPU, {}}}, - {{ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU + "," + - ov::test::utils::DEVICE_GPU, - {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::intel_auto::device_bind_buffer(true)}}, - {ov::test::utils::DEVICE_CPU, {}}, - {ov::test::utils::DEVICE_GPU, {}}}}; -}; - INSTANTIATE_TEST_SUITE_P(BehaviorTests, OVInferConsistencyTest, ::testing::Combine( ::testing::Values(10),// inferRequest num ::testing::Values(10),// infer counts ::testing::ValuesIn(configs())), OVInferConsistencyTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Auto_BehaviorTests, OVInferConsistencyTest, - ::testing::Combine( - ::testing::Values(10),// inferRequest num - ::testing::Values(10),// infer counts - ::testing::ValuesIn(AutoConfigs())), - OVInferConsistencyTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Auto_Bind_BehaviorTests, OVInferConsistencyTest, - ::testing::Combine( - ::testing::Values(0),// inferRequest num, will use optimal request number if set 0 - ::testing::Values(10),// infer counts - ::testing::ValuesIn(AutoBindConfigs())), - OVInferConsistencyTest::getTestCaseName); } // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 9b652de3073ceb..7f5402bfa5cc8e 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -15,15 +15,6 @@ auto configs = []() { return std::vector{{}}; }; -auto AutoConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}, - {}}; -}; - -auto AutoNotSupportConfigs = []() { - return std::vector{}; -}; - std::shared_ptr getFunction1() { const std::vector inputShape = {1, 4, 20, 20}; const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32; @@ -38,29 +29,6 @@ std::shared_ptr getFunction1() { return std::make_shared(relu, params, "SimpleActivation"); } -std::shared_ptr getFunction2() { - const std::vector inputShape = {1, 4, 20, 20}; - const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32; - - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - params.front()->set_friendly_name("Param_1"); - params.front()->get_output_tensor(0).set_names({"input_tensor"}); - auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); - - auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); - auto add = ngraph::builder::makeEltwise(split->output(0), in2add, ngraph::helpers::EltwiseTypes::ADD); - auto relu1 = std::make_shared(add); - - auto in2mult = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); - auto mult = ngraph::builder::makeEltwise(split->output(1), in2mult, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto relu2 = std::make_shared(mult); - - auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); - concat->get_output_tensor(0).set_names({"concat"}); - - return std::make_shared(concat, params, "SplitAddConcat"); -} - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, OVInferRequestDynamicTests, ::testing::Combine( ::testing::Values(getFunction1()), @@ -70,36 +38,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, OVInferRequestDynamicTests, ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(configs())), OVInferRequestDynamicTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestDynamicTests, - ::testing::Combine( - ::testing::Values(getFunction2()), - ::testing::Values(std::vector, std::vector>>{ - {{1, 4, 20, 20}, {1, 2, 20, 40}}, - {{2, 4, 20, 20}, {2, 2, 20, 40}}}), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferRequestDynamicTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChaining, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferenceChaining::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChainingStatic, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferenceChainingStatic::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVNotSupportRequestDynamicTests, - ::testing::Combine( - ::testing::Values(getFunction2()), - ::testing::Values(std::vector, std::vector>>{ - {{1, 4, 20, 20}, {1, 2, 20, 40}}, - {{2, 4, 20, 20}, {2, 2, 20, 40}}}), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoNotSupportConfigs())), - OVInferRequestDynamicTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index c8d46afb2d66d0..7c2322192dd6cd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -22,14 +22,6 @@ auto configs = []() { }; }; -auto MultiConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - -auto AutoConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -44,18 +36,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, ::testing::ValuesIn(configs())), OVInferRequestIOTensorTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs())), - OVInferRequestIOTensorTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferRequestIOTensorTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), @@ -103,20 +83,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorSetPrecision ::testing::ValuesIn(configs())), OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs())), - OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, - ::testing::Combine( - ::testing::ValuesIn(prcs), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(AutoConfigs())), - OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, ::testing::Combine( ::testing::ValuesIn(prcs), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index b33d1c535a30e9..7c17f4a296d21c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -18,10 +18,6 @@ auto configs = []() { }; }; -auto Multiconfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -36,18 +32,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::ValuesIn(configs())), OVInferRequestMultithreadingTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs())), - OVInferRequestMultithreadingTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestMultithreadingTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Multiconfigs())), - OVInferRequestMultithreadingTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index c460819194ac9b..b10d622fb56138 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -11,29 +11,6 @@ auto configs = []() { return std::vector{{}}; }; -auto Multiconfigs = []() { - return std::vector{ - {ov::device::priorities(ov::test::utils::DEVICE_GPU)}, -#ifdef ENABLE_INTEL_CPU - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), ov::enable_profiling(true)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::intel_auto::device_bind_buffer(false)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::intel_auto::device_bind_buffer(true)} -#endif - }; -}; - -auto Autoconfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}, -#ifdef ENABLE_INTEL_CPU - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::intel_auto::device_bind_buffer(true)} -#endif - }; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -48,36 +25,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::ValuesIn(configs())), OVInferRequestPerfCountersTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs())), - OVInferRequestPerfCountersTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestPerfCountersTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs())), - OVInferRequestPerfCountersTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), ::testing::ValuesIn(AutoBatchConfigs())), OVInferRequestPerfCountersTest::getTestCaseName); - -auto MulticonfigsTest = []() { - return std::vector{ -#ifdef ENABLE_INTEL_CPU - {ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU), - ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)} -#endif - }; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - OVInferRequestPerfCountersExceptionTest, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MulticonfigsTest())), - OVInferRequestPerfCountersExceptionTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp index bde1f8f53735d0..f5918a29cb877a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -18,14 +18,6 @@ auto configs = []() { }; }; -auto Multiconfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - -auto Autoconfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -40,18 +32,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests, ::testing::ValuesIn(configs())), OVInferRequestWaitTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(Multiconfigs())), - OVInferRequestWaitTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestWaitTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(Autoconfigs())), - OVInferRequestWaitTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVInferRequestWaitTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp index 5ee4e5a60c40d0..04869fee60ac93 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/caching_tests.cpp @@ -51,44 +51,6 @@ namespace { ::testing::Values(std::make_pair(ov::AnyMap{}, "blob"))), CompiledKernelsCacheTest::getTestCaseName); - auto autoConfigs = []() { - return std::vector>{ - std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}, "blob"), - std::make_pair( - ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_GPU, ov::test::utils::DEVICE_CPU)}}, - "blob"), - std::make_pair( - ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU)}}, - "blob")}; - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_KernelCachingSupportCase_GPU, CompiledKernelsCacheTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - CompiledKernelsCacheTest::getTestCaseName); - - const std::vector LoadFromFileConfigs = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)} - }; - const std::vector TestTargets = - {ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI, - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_GPU, CompileModelLoadFromFileTestBase, - ::testing::Combine( - ::testing::ValuesIn(TestTargets), - ::testing::ValuesIn(LoadFromFileConfigs)), - CompileModelLoadFromFileTestBase::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase_GPU, - CompileModelLoadFromMemoryTestBase, - ::testing::Combine(::testing::ValuesIn(TestTargets), - ::testing::ValuesIn(LoadFromFileConfigs)), - CompileModelLoadFromMemoryTestBase::getTestCaseName); - const std::vector GPULoadFromFileConfigs = { {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp index 20126c977df938..5eeacde0093d87 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/life_time.cpp @@ -11,23 +11,7 @@ namespace { OVHoldersTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, OVHoldersTest, - ::testing::Values("AUTO:GPU", - "MULTI:GPU", - //ov::test::utils::DEVICE_BATCH, + ::testing::Values(//ov::test::utils::DEVICE_BATCH, "HETERO:GPU"), OVHoldersTest::getTestCaseName); - -const std::vector device_names_and_priorities = { - "MULTI:GPU", // GPU via MULTI, - "AUTO:GPU", // GPU via AUTO, -#ifdef ENABLE_INTEL_CPU - "AUTO:GPU,CPU", // GPU+CPU - "AUTO:CPU,GPU", // CPU+GPU - "MULTI:GPU,CPU", // GPU+CPU - "MULTI:CPU,GPU", // CPU+GPU -#endif -}; - INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, OVHoldersTestWithConfig, - ::testing::ValuesIn(device_names_and_priorities), - OVHoldersTestWithConfig::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 34707221f92eda..31c4d4884f05d1 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -35,49 +35,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::ValuesIn(gpu_properties)), OVPropertiesTests::getTestCaseName); -auto auto_multi_properties = []() { - return std::vector{ - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::intel_auto::device_bind_buffer("YES")}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::intel_auto::device_bind_buffer("NO")}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::intel_auto::enable_startup_fallback("YES")}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), ov::intel_auto::enable_startup_fallback("NO")}}; -}; - -const std::vector multi_properties = {{ov::device::priorities("CPU", "GPU")}, - {ov::device::priorities("CPU(1)", "GPU")}, - {ov::device::priorities("CPU(1)", "GPU(2)")}}; - -const std::vector auto_properties = {{ov::device::priorities("CPU", "GPU")}, - {ov::device::priorities("-CPU", "GPU")}, - {ov::device::priorities("CPU(1)", "GPU")}, - {ov::device::priorities("CPU(1)", "GPU(2)")}, - {ov::device::priorities("CPU", "-GPU")}}; - -INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiBehaviorTests, - OVPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO, - ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_multi_properties())), - OVPropertiesTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests, - OVPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_properties)), - OVPropertiesTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MultiBehaviorTests, - OVPropertiesTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_properties)), - OVPropertiesTests::getTestCaseName); - const std::vector gpu_setcore_properties = { {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), ov::hint::num_requests(2), @@ -94,50 +51,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_gpuCompileModelBehaviorTests, ::testing::ValuesIn(gpu_compileModel_properties)), OVSetPropComplieModleGetPropTests::getTestCaseName); -const std::vector multi_setcore_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}}; -const std::vector multi_compileModel_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; - -INSTANTIATE_TEST_SUITE_P(smoke_MultiCompileModelBehaviorTests, - OVSetPropComplieModleGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multi_setcore_properties), - ::testing::ValuesIn(multi_compileModel_properties)), - OVSetPropComplieModleGetPropTests::getTestCaseName); - -const std::vector auto_setcore_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::HIGH)}, -}; -const std::vector auto_compileModel_properties = { - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::device::priorities(ov::test::utils::DEVICE_GPU), - ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), - ov::hint::model_priority(ov::hint::Priority::MEDIUM)}}; -INSTANTIATE_TEST_SUITE_P(smoke_AutoCompileModelBehaviorTests, - OVSetPropComplieModleGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_setcore_properties), - ::testing::ValuesIn(auto_compileModel_properties)), - OVSetPropComplieModleGetPropTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(nightly_OVClassCommon, OVBasicPropertiesTestsP, ::testing::Values(std::make_pair("openvino_intel_gpu_plugin", "GPU"))); @@ -146,9 +59,9 @@ INSTANTIATE_TEST_SUITE_P(nightly_OVClassCommon, // // IE Class GetMetric // // -INSTANTIATE_TEST_SUITE_P(nightly_MultiHeteroAutoBatchOVGetMetricPropsTest, +INSTANTIATE_TEST_SUITE_P(nightly_HeteroAutoBatchOVGetMetricPropsTest, OVGetMetricPropsTest, - ::testing::Values("MULTI", "HETERO", "AUTO", "BATCH")); + ::testing::Values("HETERO", "BATCH")); INSTANTIATE_TEST_SUITE_P(nightly_gpuOVGetMetricPropsTest, OVGetMetricPropsTest, ::testing::Values("GPU")); @@ -156,14 +69,6 @@ INSTANTIATE_TEST_SUITE_P(nightly_OVGetAvailableDevicesPropsTest, OVGetAvailableDevicesPropsTest, ::testing::Values("GPU")); -INSTANTIATE_TEST_SUITE_P( - smoke_MultiAutoOVCheckSetSupportedRWMetricsPropsTests, - OVCheckSetSupportedRWMetricsPropsTests, - ::testing::Combine(::testing::Values("MULTI:GPU", "AUTO:GPU"), - ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues( - {ov::hint::model_priority.name(), ov::log::level.name()}))), - OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P( smoke_OVCheckGetSupportedROMetricsPropsTests, OVCheckGetSupportedROMetricsPropsTests, @@ -172,9 +77,9 @@ INSTANTIATE_TEST_SUITE_P( { ov::device::uuid.name(), ov::device::luid.name(), ov::device::gops.name(), ov::device::type.name(), ov::device::full_name.name() }))), OVCheckGetSupportedROMetricsPropsTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(nightly_MultiHeteroAutoBatchOVCheckChangePropComplieModleGetPropTests_DEVICE_ID, +INSTANTIATE_TEST_SUITE_P(nightly_HeteroAutoBatchOVCheckChangePropComplieModleGetPropTests_DEVICE_ID, OVCheckChangePropComplieModleGetPropTests_DEVICE_ID, - ::testing::Combine(::testing::Values("MULTI", "HETERO", "AUTO", "BATCH"), + ::testing::Combine(::testing::Values("HETERO", "BATCH"), ::testing::Values(ov::AnyMap({}))), OVCheckChangePropComplieModleGetPropTests_DEVICE_ID::getTestCaseName); @@ -201,7 +106,7 @@ auto multiConfigs = []() { INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetDevicePriorityConfigPropsTest, OVClassSetDevicePriorityConfigPropsTest, - ::testing::Combine(::testing::Values("MULTI", "AUTO", "HETERO"), + ::testing::Combine(::testing::Values("HETERO"), ::testing::ValuesIn(multiConfigs()))); // // GPU specific metrics diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp index 274f501c7e1456..4ec0160690d4cb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/remote.cpp @@ -16,10 +16,6 @@ std::vector> generate_remote_params() { return {}; } -auto MultiConfigs = []() { - return std::vector{{ov::device::priorities(ov::test::utils::DEVICE_GPU)}}; -}; - auto AutoBatchConfigs = []() { return std::vector{ // explicit batch size 4 to avoid fallback to no auto-batching (i.e. plain GPU) @@ -36,14 +32,6 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_BehaviorTests, OVRemoteTest, ::testing::ValuesIn(generate_remote_params())), OVRemoteTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_Multi_BehaviorTests, OVRemoteTest, - ::testing::Combine( - ::testing::Values(ngraph::element::f32), - ::testing::Values(::ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(MultiConfigs()), - ::testing::ValuesIn(generate_remote_params())), - OVRemoteTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_AutoBatch_BehaviorTests, OVRemoteTest, ::testing::Combine( ::testing::Values(ngraph::element::f32), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp index 7a0da8fb9e339c..5f3e4120047a82 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp @@ -48,25 +48,4 @@ namespace { ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(std::make_pair(std::map(), "blob"))), LoadNetworkCompiledKernelsCacheTest::getTestCaseName); - - typedef std::map conftype; - auto autoConfigs = []() { - return std::vector>{ - std::make_pair(conftype{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU}}, - "blob"), - std::make_pair(conftype{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - (std::string(ov::test::utils::DEVICE_GPU) + "," + ov::test::utils::DEVICE_CPU)}}, - "blob"), - std::make_pair(conftype{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - (std::string(ov::test::utils::DEVICE_CPU) + "," + ov::test::utils::DEVICE_GPU)}}, - "blob")}; - }; - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_KernelCachingSupportCase_GPU, LoadNetworkCompiledKernelsCacheTest, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - LoadNetworkCompiledKernelsCacheTest::getTestCaseName); - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp index 221c1fde058519..623246fafe0b1d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/configuration_tests.cpp @@ -30,63 +30,6 @@ namespace { {{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}}; }; - auto multiinconfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}}}; - }; - - auto autoinconfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}}, - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}}}; - }; - auto auto_batch_inconfigs = []() { return std::vector>{ {{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), ov::test::utils::DEVICE_GPU}, @@ -110,19 +53,6 @@ namespace { ::testing::ValuesIn(inconfigs())), IncorrectConfigTests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs())), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, IncorrectConfigTests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs())), - IncorrectConfigTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, IncorrectConfigTests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), @@ -153,17 +83,6 @@ namespace { ::testing::ValuesIn(inconfigs())), IncorrectConfigAPITests::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiinconfigs())), - IncorrectConfigAPITests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, IncorrectConfigAPITests, - ::testing::Combine( - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoinconfigs())), - IncorrectConfigAPITests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, IncorrectConfigAPITests, ::testing::Combine( ::testing::Values(ov::test::utils::DEVICE_BATCH), @@ -190,41 +109,10 @@ namespace { {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}, }}; - auto auto_multi_prop_config = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, - InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}}; - }; - - auto auto_multi_loadNetWork_config = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}, - {InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, - InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}}; - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, SetPropLoadNetWorkGetPropTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(gpu_prop_config), ::testing::ValuesIn(gpu_loadNetWork_config)), SetPropLoadNetWorkGetPropTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(auto_multi_prop_config()), - ::testing::ValuesIn(auto_multi_loadNetWork_config())), - SetPropLoadNetWorkGetPropTests::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, - SetPropLoadNetWorkGetPropTests, - ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(auto_multi_prop_config()), - ::testing::ValuesIn(auto_multi_loadNetWork_config())), - SetPropLoadNetWorkGetPropTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp index 437df754176eab..ff85685d245db7 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_integration.cpp @@ -35,12 +35,12 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( @@ -50,7 +50,7 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( @@ -80,12 +80,12 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( nightly_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, - ::testing::Values("GPU", "MULTI", "HETERO", "AUTO", "BATCH") + ::testing::Values("GPU", "HETERO", "BATCH") ); INSTANTIATE_TEST_SUITE_P( diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp index 8eba235a4731d4..2ffabe771b656f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp @@ -22,18 +22,6 @@ namespace { }; }; - auto multiConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}}}; - }; - - auto autoConfigs = []() { - return std::vector>{ - {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_GPU}, - {InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, - ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}}}; - }; - INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisions), @@ -41,20 +29,6 @@ namespace { ::testing::ValuesIn(configs())), InferRequestPreprocessTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_MULTI), - ::testing::ValuesIn(multiConfigs())), - InferRequestPreprocessTest::getTestCaseName); - - INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_AUTO), - ::testing::ValuesIn(autoConfigs())), - InferRequestPreprocessTest::getTestCaseName); - const std::vector ioPrecisions = { InferenceEngine::Precision::FP32, InferenceEngine::Precision::U8 diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp index 6f8755b10263f2..00fd89116bb52b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/version.cpp @@ -10,14 +10,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, VersionTest, ::testing::Values(ov::test::utils::DEVICE_GPU), VersionTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, VersionTest, - ::testing::Values(ov::test::utils::DEVICE_MULTI), - VersionTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, VersionTest, - ::testing::Values(ov::test::utils::DEVICE_AUTO), - VersionTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, VersionTest, ::testing::Values(ov::test::utils::DEVICE_HETERO), VersionTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/multi/gpu_remote_blob_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/multi/gpu_remote_blob_tests.cpp deleted file mode 100644 index 1b8fdb69dc9e72..00000000000000 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/multi/gpu_remote_blob_tests.cpp +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "gpu/gpu_config.hpp" -#include "multi/multi_remote_blob_tests.hpp" -#include "multi/multi_remote_blob_multidevice_test.hpp" -#include "common_test_utils/test_constants.hpp" -#include - -using MultiDevice_Bind_oversubsciption_test = MultiDevice_Test; - -auto device_names_and_support_for_remote_blobs = []() { - return std::vector{ - {{GPU}, true, {}}, // GPU via MULTI, - {{"GPU.0"}, true, {}}, // GPU.0 via MULTI, - {{GPU}, true, {ov::intel_auto::device_bind_buffer(true)}}, // GPU via MULTI, - {{"GPU.0"}, true, {ov::intel_auto::device_bind_buffer(true)}}, // GPU.0 via MULTI, -#ifdef ENABLE_INTEL_CPU - {{GPU, CPU}, true, {}}, // GPU+CPU - {{CPU, GPU}, true, {}}, // CPU+GPU - {{GPU, CPU}, true, {ov::intel_auto::device_bind_buffer(true)}}, // GPU+CPU -#endif - }; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobGPU, - MultiDevice_SupportTest, - ::testing::ValuesIn(device_names_and_support_for_remote_blobs()), - MultiDevice_SupportTest::getTestCaseName); - -TEST_P(MultiDevice_Test, cannotInferRemoteBlobIfNotInitializedForDevice) { - InferenceEngine::CNNNetwork net(fn_ptr); - auto ie = PluginCache::get().ie(); - // load a network to the GPU to make sure we have a remote context - auto exec_net = ie->LoadNetwork(net, GPU); - auto ctx = exec_net.GetContext(); - - const InferenceEngine::ConstInputsDataMap inputInfo = exec_net.GetInputsInfo(); - auto& first_input_name = inputInfo.begin()->first; - auto& first_input = inputInfo.begin()->second; - auto rblob = InferenceEngine::make_shared_blob(first_input->getTensorDesc(), ctx); - rblob->allocate(); - - std::map configs; - for (auto&& value : _properties) { - configs.emplace(value.first, value.second.as()); - } - - InferenceEngine::ExecutableNetwork exec_net_multi; - try { - exec_net_multi = ie->LoadNetwork(net, device_names, configs); - } catch(...) { - // device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test - return; - } - InferenceEngine::InferRequest req = exec_net_multi.CreateInferRequest(); - ASSERT_TRUE(req); - ASSERT_NO_THROW(req.SetBlob(first_input_name, rblob)); - ASSERT_NO_THROW(req.StartAsync()); - // cpu can consume remote buffer - auto exe_device = exec_net_multi.GetConfig("EXECUTION_DEVICES").as>(); - if (exe_device.size() == 1 && exe_device[0] == "CPU") - ASSERT_NO_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY)); - else - ASSERT_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY), InferenceEngine::Exception); -} - -TEST_P(MultiDevice_Bind_oversubsciption_test, oversubsciptionOfInferRequest) { - InferenceEngine::CNNNetwork net(fn_ptr); - auto ie = PluginCache::get().ie(); - // load a network to the GPU to make sure we have a remote context - auto exec_net = ie->LoadNetwork(net, GPU); - auto ctx = exec_net.GetContext(); - - const InferenceEngine::ConstInputsDataMap inputInfo = exec_net.GetInputsInfo(); - auto& first_input = inputInfo.begin()->second; - auto rblob = InferenceEngine::make_shared_blob(first_input->getTensorDesc(), ctx); - rblob->allocate(); - - std::map configs; - for (auto&& value : _properties) { - configs.emplace(value.first, value.second.as()); - } - - InferenceEngine::ExecutableNetwork exec_net_multi; - try { - exec_net_multi = ie->LoadNetwork(net, device_names, configs); - } catch(...) { - // device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test - return; - } - - unsigned int optimalNum = 0; - try { - optimalNum = exec_net_multi.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); - } catch (...) { - std::cout << "ExecutableNetwork getMetric failed" << std::endl; - return; - } - - // test binder mode to throw exception when oversubsciption of infer requests - InferenceEngine::InferRequest req; - for (size_t i = 0; i < optimalNum; i++) { - req = exec_net_multi.CreateInferRequest(); - } - ASSERT_ANY_THROW(req = exec_net_multi.CreateInferRequest()); -} - -auto device_names_and_support_for_remote_blobs2 = []() { - return std::vector{ - // another GPU (the test will test its presence), different OCL contexts - // use GPU.0 as reference, expect auto to throw exception on other hardware contexts -#ifdef ENABLE_INTEL_CPU - //{{CPU}, {}}, // stand-alone CPU via MULTI (no GPU), no OCL context - {{"GPU.1", CPU}, - {ov::intel_auto::device_bind_buffer(true)}}, - {{"GPU.1", CPU}, - {ov::intel_auto::device_bind_buffer(false)}}, -#endif - {{"GPU.1"}, {}}, - {{"GPU.1"}, {ov::intel_auto::device_bind_buffer(true)}}, - }; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobInitializedWithoutGPU, - MultiDevice_Test, - ::testing::ValuesIn(device_names_and_support_for_remote_blobs2()), - MultiDevice_Test::getTestCaseName); - -auto multi_bind_oversubsciption_test = []() { - return std::vector{{{GPU}, {ov::intel_auto::device_bind_buffer(true)}}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobOversubsciptionInferRequest, - MultiDevice_Bind_oversubsciption_test, - ::testing::ValuesIn(multi_bind_oversubsciption_test()), - MultiDevice_Test::getTestCaseName); - -auto multi_device_names_and_support_for_remote_blobs = []() { - return std::vector{ -#ifdef ENABLE_INTEL_CPU - {"GPU.0", CPU}, - {"GPU.0", "GPU.1", CPU}, // another GPU (the test will test its presence), different OCL contexts -#endif - {"GPU.0", "GPU.1"}}; -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobInitializedWithoutGPU, - MultiDeviceMultipleGPU_Test, - ::testing::ValuesIn(multi_device_names_and_support_for_remote_blobs()), - MultiDeviceMultipleGPU_Test::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 677cc9a2c217e4..798282680dbccd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -37,13 +37,9 @@ std::vector disabledTestPatterns() { // Not allowed dynamic loop tests on GPU R"(.*smoke_StaticShapeLoop_dynamic_exit.*)", - // Not expected behavior - R"(.*Behavior.*(Multi|Auto).*InferRequestSetBlobByType.*Batched.*)", - R"(.*(Multi|Auto).*Behavior.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)", // TODO Issue 100145 R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", R"(.*Behavior.*OVInferRequestDynamicTests.*InferUpperBoundNetworkAfterIOTensorsReshaping.*)", - R"(.*(Auto|Multi).*Behavior.*IncorrectConfigTests.*CanNotLoadNetworkWithIncorrectConfig.*)", // Not implemented yet: R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", // TODO: Issue 67408 @@ -68,8 +64,6 @@ std::vector disabledTestPatterns() { R"(.*smoke.*BehaviorTests.*DynamicInputToDynamicOutput.*)", // Issue: 76197 R"(.*registerPluginsXMLUnicodePath.*)", - // Issue: CVS-76980 - R"(.*smoke_Auto_BehaviorTests.*InferDynamicNetwork/.*)", // Issue: CVS-88667 - Need to verify hetero interoperability R"(.*nightly_OVClassHeteroExecutableNetworlGetMetricTest.*SUPPORTED_(CONFIG_KEYS|METRICS).*)", // TODO: Issue: 89555 @@ -89,19 +83,13 @@ std::vector disabledTestPatterns() { R"(.*smoke_ConvolutionLayerGPUTest_dynamic1DSymPad.*)", // Looks like the test is targeting CPU plugin and doesn't respect that execution graph may vary from plugin to plugin R"(.*ExecGraphSerializationTest.*)", - // TODO: support getconfig in auto/multi CVS-104942 - // TODO: move auto/multi cases to dedicated unit tests - R"(.*(Auto|Multi).*SetPropLoadNetWorkGetPropTests.*)", // unsupported metrics - R"(.*nightly_MultiHeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", + R"(.*nightly_HeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", // Issue: 111437 R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.CompareWithRefs.*)", R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.CompareWithRefs.*)", // Issue: 111440 R"(.*smoke_set1/GatherElementsGPUTest.CompareWithRefs.*)", - // New plugin API doesn't support changes of pre-processing - R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)", - R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)", // New plugin work with tensors, so it means that blob in old API can have different pointers R"(.*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)", R"(.*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)",