From 511f06f9ba87e1f2cda39bd6b83029b7da00b8d0 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Mon, 4 Sep 2023 11:35:07 +0200 Subject: [PATCH 01/16] [DOCS] Fix for Install from Docker Image for master (#19505) * Update installing-openvino-docker-linux.md * Update installing-openvino-docker-linux.md * Update installing-openvino-docker-linux.md --- docs/install_guides/installing-openvino-docker-linux.md | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/docs/install_guides/installing-openvino-docker-linux.md b/docs/install_guides/installing-openvino-docker-linux.md index 0b200208db87d2..740e3b0371e9a8 100644 --- a/docs/install_guides/installing-openvino-docker-linux.md +++ b/docs/install_guides/installing-openvino-docker-linux.md @@ -22,13 +22,8 @@ You can get started easily with pre-built and published docker images, which are * `Red Hat Ecosystem Catalog (development image) `__ * `Azure Marketplace `__ -.. note:: - - OpenVINO development environment in a docker container is also available in `notebook repository `__ . It can be deployed in `OpenShift RedHat OpenData Science (RHODS) `__ - You can use the `available Dockerfiles on GitHub `__ or generate a Dockerfile with your settings via `DockerHub CI framework `__ , which can generate a Dockerfile, build, test, and deploy an image using the Intel® Distribution of OpenVINO™ toolkit. You can reuse available Dockerfiles, add your layer and customize the OpenVINO™ image to your needs. Docker CI repository includes guides on how to how to `get started with docker images `__ and how to use `OpenVINO™ Toolkit containers with GPU accelerators. `__ - To start using Dockerfiles the following conditions must be met: - Linux OS or Windows (under :ref:`Windows Subsystem for Linux (WSL2) `) @@ -37,11 +32,11 @@ To start using Dockerfiles the following conditions must be met: .. note:: - OpenVINO's `Docker `__ and `Bare Metal `__ distributions are identical, so the documentation applies to both. + OpenVINO's `Docker `__ and :doc:`Bare Metal ` distributions are identical, so the documentation applies to both. .. note:: - The OpenVINO development environment in a docker container is also available in the `notebook repository `__ . It can be implemented in `OpenShift RedHat OpenData Science (RHODS) `__. + OpenVINO development environment in a docker container is also available in the `notebook repository `__ . It can be implemented in `OpenShift RedHat OpenData Science (RHODS) `__. More information about Docker CI for Intel® Distribution of OpenVINO™ toolset can be found `here `__ From c46f6bf115277a8a16fd01ca98ee166e871a9d31 Mon Sep 17 00:00:00 2001 From: Mateusz Mikolajczyk Date: Mon, 4 Sep 2023 13:04:28 +0200 Subject: [PATCH 02/16] [PT FE] Add aten::swapaxes (#19483) * Add aten::swapaxes * Add comment * Improve swapaxes tests --- src/frontends/pytorch/src/op_table.cpp | 1 + .../pytorch_tests/test_transpose.py | 51 ++++++++++++------- 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index a630ec185b0254..244b8fd804a1e1 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -427,6 +427,7 @@ const std::map get_supported_ops_ts() { {"aten::sub", op::translate_sub}, {"aten::sub_", op::inplace_op}, {"aten::sum", op::translate_sum}, + {"aten::swapaxes", op::quantizable_op}, {"aten::t", op::translate_t}, {"aten::t_", op::inplace_op}, {"aten::tan", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, diff --git a/tests/layer_tests/pytorch_tests/test_transpose.py b/tests/layer_tests/pytorch_tests/test_transpose.py index 6cd7fe5d4cda4f..9f9652053182cb 100644 --- a/tests/layer_tests/pytorch_tests/test_transpose.py +++ b/tests/layer_tests/pytorch_tests/test_transpose.py @@ -1,52 +1,63 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import numpy as np import pytest - +import torch from pytorch_layer_test_class import PytorchLayerTest class TestTranspose(PytorchLayerTest): def _prepare_input(self): - import numpy as np return (np.random.randn(2, 3, 4, 5).astype(np.float32),) - def create_model(self, dim0, dim1): - import torch + def create_model(self, dim0, dim1, op_type): + class swapaxes(torch.nn.Module): + def __init__(self, dim0, dim1): + super().__init__() + self.dim0 = dim0 + self.dim1 = dim1 + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.swapaxes(x, self.dim0, self.dim1) class aten_transpose(torch.nn.Module): - def __init__(self, dim0, dim1): + def __init__(self, dim0, dim1, op_type): super(aten_transpose, self).__init__() self.dim0 = dim0 self.dim1 = dim1 + op_types = {"transpose": self.forward_transpose, "swapaxes": self.forward_swapaxes} + self.swapaxes = swapaxes(dim0, dim1) + self.forward = op_types.get(op_type) - def forward(self, x): + def forward_transpose(self, x): return torch.transpose(x, self.dim0, self.dim1) + def forward_swapaxes(self, x: torch.Tensor) -> torch.Tensor: + # To reproduce aten::swapaxes in graph, swapaxes need to be in separate graph and tracing need to be used. + return self.swapaxes(x) + ref_net = None - return aten_transpose(dim0, dim1), ref_net, "aten::transpose" + return aten_transpose(dim0, dim1, op_type), ref_net, f"aten::{op_type}" @pytest.mark.parametrize("dim0", [0, 1, 2, 3, -1, -2, -3, -4]) @pytest.mark.parametrize("dim1", [0, 1, 2, 3, -1, -2, -3, -4]) + @pytest.mark.parametrize("op_type", ["transpose", "swapaxes"]) @pytest.mark.nightly @pytest.mark.precommit - def test_transpose(self, dim0, dim1, ie_device, precision, ir_version): - self._test(*self.create_model(dim0, dim1), - ie_device, precision, ir_version) + def test_transpose(self, dim0, dim1, op_type, ie_device, precision, ir_version): + self._test(*self.create_model(dim0, dim1, op_type), ie_device, precision, ir_version, trace_model=True) class TestTSmall(PytorchLayerTest): def _prepare_input(self, num_dims=2, input_dtype="float32"): - import numpy as np shape = (2, 3) if num_dims == 0: - return (np.array(num_dims).astype(input_dtype), ) + return (np.array(num_dims).astype(input_dtype),) return (np.random.randn(*shape[:num_dims]).astype(input_dtype),) def create_model(self, num_dims=2, inplace=False): - import torch - class aten_transpose(torch.nn.Module): def __init__(self, inplace): super(aten_transpose, self).__init__() @@ -61,7 +72,7 @@ def forward_inplace(self, x): ref_net = None - return aten_transpose(inplace), ref_net, "aten::t" if not inplace else "aten::t_" + return aten_transpose(inplace), ref_net, "aten::t" if not inplace else "aten::t_" @pytest.mark.parametrize("num_dims", [0, 1, 2]) @pytest.mark.parametrize("input_dtype", ["float32", "int32"]) @@ -69,6 +80,10 @@ def forward_inplace(self, x): @pytest.mark.nightly @pytest.mark.precommit def test_t_small(self, num_dims, input_dtype, inplace, ie_device, precision, ir_version): - self._test(*self.create_model(num_dims, inplace), - ie_device, precision, ir_version, - kwargs_to_prepare_input={"num_dims": num_dims, "input_dtype": input_dtype}) + self._test( + *self.create_model(num_dims, inplace), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input={"num_dims": num_dims, "input_dtype": input_dtype}, + ) From bd0c156a70693ac6f7b10fa64d10d8d6aa532478 Mon Sep 17 00:00:00 2001 From: Mateusz Tabaka Date: Mon, 4 Sep 2023 13:58:53 +0200 Subject: [PATCH 03/16] PullReshapeThroughReduce - skip transformation if Reshape doesn't unsqueeze input (#19477) Ticket: CVS-118905 --- .../common_optimizations/pull_through_reduce.cpp | 3 +++ .../tests/common_optimizations/pull_through_reduce_test.cpp | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/src/common/transformations/src/transformations/common_optimizations/pull_through_reduce.cpp b/src/common/transformations/src/transformations/common_optimizations/pull_through_reduce.cpp index 8f16025a1acf29..0ceac5ae44a012 100644 --- a/src/common/transformations/src/transformations/common_optimizations/pull_through_reduce.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/pull_through_reduce.cpp @@ -55,6 +55,9 @@ const std::vector adjust_axes(const std::vector& axes_to_align // - Reshape(input_shape={5,10,15}, target_shape={5,10,1,15}), 2 axis is returned std::vector try_get_unsqueeze_axes_from_reshape(const ov::Shape& target_shape, const ov::Shape& input_shape) { std::vector result; + if (target_shape.size() <= input_shape.size()) { + return result; + } if (input_shape.size() == 0) { // scalar case - can be reshaped only to [1,..,1] shape result.resize(target_shape.size(), 0); std::iota(std::begin(result), std::end(result), 0); diff --git a/src/common/transformations/tests/common_optimizations/pull_through_reduce_test.cpp b/src/common/transformations/tests/common_optimizations/pull_through_reduce_test.cpp index a516e966090e41..fb1689f6bbc07b 100644 --- a/src/common/transformations/tests/common_optimizations/pull_through_reduce_test.cpp +++ b/src/common/transformations/tests/common_optimizations/pull_through_reduce_test.cpp @@ -360,6 +360,11 @@ TEST_F(TransformationTestsF, PullReshapeThroughReduceSkipIfTheSameAxesScalarCase manager.register_pass(); } +TEST_F(TransformationTestsF, PullReshapeThroughReduceSkipIfReshapeDoesntUnsqueeze) { + model = generate_reshape_model(element::f32, {1, 100, 1}, {1, 1, 100}, {2}); + manager.register_pass(); +} + TEST_F(TransformationTestsF, PullReshapeThroughReduceSkipIfNonConstAxes) { const auto input = std::make_shared(element::f32, PartialShape{5, 10, 15}); const auto target_shape = Constant::create(element::i64, Shape{4}, {1, 5, 10, 15}); From 8f4d72826a9e5c102510608941da908b05b0efa9 Mon Sep 17 00:00:00 2001 From: Sofya Balandina Date: Mon, 4 Sep 2023 13:52:23 +0100 Subject: [PATCH 04/16] [apiConformance] Fix double numbers in results after merge xml (#19564) --- .../layer_tests_summary/merge_xmls.py | 46 ++++++++++++------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py index bea7dc09d58877..c26bf8cd42b309 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py @@ -76,28 +76,40 @@ def aggregate_test_results(aggregated_results: SubElement, xml_reports: list, xml_device_entry = ET.fromstring(new_data) device_name = xml_device_entry.tag aggregated_device_results = aggregated_results.find(device_name) + # example: ov_plugin or Add-1 for xml_results_entry in xml_device_entry: - aggregated_results_entry = None - if not aggregated_device_results is None: - aggregated_results_entry = aggregated_device_results.find(xml_results_entry.tag) - if aggregated_results_entry is None: - stat_update_utils.update_rel_values(xml_results_entry) + if report_type == OP_CONFORMANCE or report_type == OP_CONFORMANCE.lower(): + aggregated_results_entry = None + if not aggregated_device_results is None: + aggregated_results_entry = aggregated_device_results.find(xml_results_entry.tag) + if aggregated_results_entry is None: + stat_update_utils.update_rel_values(xml_results_entry) + if aggregated_device_results is None: + aggregated_results.append(xml_device_entry) + aggregated_device_results = aggregated_results.find(device_name) + else: + aggregated_device_results.append(xml_results_entry) + continue + update_result_node(xml_results_entry, aggregated_results_entry) + else: + aggregated_results_entry = None if aggregated_device_results is None: aggregated_results.append(xml_device_entry) - aggregated_device_results = aggregated_results.find(device_name) + break + else: + aggregated_results_entry = aggregated_device_results.find(xml_results_entry.tag) + if aggregated_results_entry: + for xml_real_device_entry in xml_results_entry: + aggregated_real_device_api_report = None + aggregated_real_device_api_report = aggregated_results_entry.find(xml_real_device_entry.tag) + if aggregated_real_device_api_report is None: + stat_update_utils.update_rel_values(xml_results_entry) + aggregated_results_entry.append(xml_real_device_entry) + continue + update_result_node(xml_real_device_entry, aggregated_real_device_api_report) else: aggregated_device_results.append(xml_results_entry) - continue - if report_type == OP_CONFORMANCE or report_type == OP_CONFORMANCE.lower(): - update_result_node(xml_results_entry, aggregated_results_entry) - else: - for xml_real_device_entry in xml_results_entry: - aggregated_real_device_api_report = aggregated_results_entry.find(xml_real_device_entry.tag) - if aggregated_real_device_api_report is None: - stat_update_utils.update_rel_values(xml_results_entry) - aggregated_results_entry.append(xml_real_device_entry) - continue - update_result_node(xml_real_device_entry, aggregated_real_device_api_report) + return aggregated_timestamp From 2d760ba1bf656655133a8242b36f5fc4e251a3f7 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Mon, 4 Sep 2023 14:57:35 +0200 Subject: [PATCH 05/16] Adding Quantizing with Accuracy Control using NNCF notebook (#19585) --- .../notebooks/001-hello-world-with-output.rst | 6 +- .../003-hello-segmentation-with-output.rst | 6 +- .../004-hello-detection-with-output.rst | 6 +- ...classification-to-openvino-with-output.rst | 8 +- ...2-pytorch-onnx-to-openvino-with-output.rst | 6 +- .../102-pytorch-to-openvino-with-output.rst | 6 +- ...to-openvino-classification-with-output.rst | 6 +- .../notebooks/104-model-tools-with-output.rst | 4 +- ...105-language-quantize-bert-with-output.rst | 6 +- .../notebooks/106-auto-device-with-output.rst | 6 +- ...tion-quantization-data2vec-with-output.rst | 6 +- docs/notebooks/108-gpu-device-with-output.rst | 2 + .../109-latency-tricks-with-output.rst | 6 +- .../109-throughput-tricks-with-output.rst | 6 +- ...110-ct-scan-live-inference-with-output.rst | 6 +- ...segmentation-quantize-nncf-with-output.rst | 6 +- ...ov5-quantization-migration-with-output.rst | 5 +- ...training-quantization-nncf-with-output.rst | 5 +- ...lassification-quantization-with-output.rst | 4 +- docs/notebooks/115-async-api-with-output.rst | 4 +- .../116-sparsity-optimization-with-output.rst | 4 +- .../117-model-server-with-output.rst | 4 +- ...118-optimize-preprocessing-with-output.rst | 4 +- .../119-tflite-to-openvino-with-output.rst | 4 +- ...ject-detection-to-openvino-with-output.rst | 4 +- .../121-convert-to-openvino-with-output.rst | 2 + ...tion-quantization-wav2vec2-with-output.rst | 309 ++++++++++++++++++ ...tion-with-accuracy-control-with-output.rst | 306 +++++++++++++++++ .../201-vision-monodepth-with-output.rst | 4 +- ...sion-superresolution-image-with-output.rst | 4 +- ...sion-superresolution-video-with-output.rst | 4 +- .../203-meter-reader-with-output.rst | 4 +- ...nter-semantic-segmentation-with-output.rst | 4 +- ...-vision-background-removal-with-output.rst | 4 +- ...206-vision-paddlegan-anime-with-output.rst | 4 +- ...-paddlegan-superresolution-with-output.rst | 4 +- ...ical-character-recognition-with-output.rst | 4 +- .../209-handwritten-ocr-with-output.rst | 4 +- ...slowfast-video-recognition-with-output.rst | 4 +- .../211-speech-to-text-with-output.rst | 4 +- ...annote-speaker-diarization-with-output.rst | 4 +- .../213-question-answering-with-output.rst | 4 +- .../214-grammar-correction-with-output.rst | 4 +- .../215-image-inpainting-with-output.rst | 4 +- .../216-attention-center-with-output.rst | 4 +- .../217-vision-deblur-with-output.rst | 2 + ...-detection-and-recognition-with-output.rst | 4 +- ...219-knowledge-graphs-conve-with-output.rst | 4 +- ...ss-lingual-books-alignment-with-output.rst | 4 +- .../221-machine-translation-with-output.rst | 4 +- ...-vision-image-colorization-with-output.rst | 4 +- .../223-text-prediction-with-output.rst | 4 +- ...-segmentation-point-clouds-with-output.rst | 4 +- ...le-diffusion-text-to-image-with-output.rst | 4 +- .../226-yolov7-optimization-with-output.rst | 4 +- ...isper-subtitles-generation-with-output.rst | 4 +- ...228-clip-zero-shot-convert-with-output.rst | 4 +- ...28-clip-zero-shot-quantize-with-output.rst | 4 +- ...rt-sequence-classification-with-output.rst | 4 +- .../230-yolov8-optimization-with-output.rst | 4 +- ...ruct-pix2pix-image-editing-with-output.rst | 4 +- ...visual-language-processing-with-output.rst | 4 +- ...-encodec-audio-compression-with-output.rst | 4 +- ...ontrolnet-stable-diffusion-with-output.rst | 4 +- ...diffusion-v2-infinite-zoom-with-output.rst | 4 +- ...v2-optimum-demo-comparison-with-output.rst | 4 +- ...-diffusion-v2-optimum-demo-with-output.rst | 4 +- ...sion-v2-text-to-image-demo-with-output.rst | 4 +- ...diffusion-v2-text-to-image-with-output.rst | 4 +- .../237-segment-anything-with-output.rst | 2 + .../238-deep-floyd-if-with-output.rst | 6 +- .../239-image-bind-convert-with-output.rst | 4 +- ...ly-2-instruction-following-with-output.rst | 4 +- ...41-riffusion-text-to-music-with-output.rst | 4 +- ...42-freevc-voice-conversion-with-output.rst | 4 +- ...tflite-selfie-segmentation-with-output.rst | 4 +- ...4-named-entity-recognition-with-output.rst | 4 +- .../248-stable-diffusion-xl-with-output.rst | 4 +- .../250-music-generation-with-output.rst | 4 +- ...1-tiny-sd-image-generation-with-output.rst | 6 +- ...tcomposer-image-generation-with-output.rst | 4 +- .../253-zeroscope-text2video-with-output.rst | 4 +- ...low-training-openvino-nncf-with-output.rst | 2 + ...nsorflow-training-openvino-with-output.rst | 2 + ...uantization-aware-training-with-output.rst | 4 +- ...uantization-aware-training-with-output.rst | 4 +- .../401-object-detection-with-output.rst | 4 +- .../402-pose-estimation-with-output.rst | 4 +- ...-action-recognition-webcam-with-output.rst | 4 +- .../404-style-transfer-with-output.rst | 4 +- .../405-paddle-ocr-webcam-with-output.rst | 4 +- .../406-3D-pose-estimation-with-output.rst | 4 +- .../407-person-tracking-with-output.rst | 4 +- docs/tutorials.md | 9 + 94 files changed, 917 insertions(+), 95 deletions(-) create mode 100644 docs/notebooks/122-speech-recognition-quantization-wav2vec2-with-output.rst create mode 100644 docs/notebooks/122-yolov8-quantization-with-accuracy-control-with-output.rst diff --git a/docs/notebooks/001-hello-world-with-output.rst b/docs/notebooks/001-hello-world-with-output.rst index 1b8752b43cfa56..b5fc484fc044c1 100644 --- a/docs/notebooks/001-hello-world-with-output.rst +++ b/docs/notebooks/001-hello-world-with-output.rst @@ -1,7 +1,7 @@ Hello Image Classification ========================== -.. _top: + This basic introduction to OpenVINO™ shows how to do inference with an image classification model. @@ -15,6 +15,10 @@ created, refer to the `TensorFlow to OpenVINO <101-tensorflow-classification-to-openvino-with-output.html>`__ tutorial. + + +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/003-hello-segmentation-with-output.rst b/docs/notebooks/003-hello-segmentation-with-output.rst index 664854ae8d30ec..de8e1d16974252 100644 --- a/docs/notebooks/003-hello-segmentation-with-output.rst +++ b/docs/notebooks/003-hello-segmentation-with-output.rst @@ -1,7 +1,7 @@ Hello Image Segmentation ======================== -.. _top: + A very basic introduction to using segmentation models with OpenVINO™. @@ -12,6 +12,10 @@ Zoo `__ is used. ADAS stands for Advanced Driver Assistance Services. The model recognizes four classes: background, road, curb and mark. + + +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/004-hello-detection-with-output.rst b/docs/notebooks/004-hello-detection-with-output.rst index 35d47be09d1ded..8a96d8e68f06f6 100644 --- a/docs/notebooks/004-hello-detection-with-output.rst +++ b/docs/notebooks/004-hello-detection-with-output.rst @@ -1,7 +1,7 @@ Hello Object Detection ====================== -.. _top: + A very basic introduction to using object detection models with OpenVINO™. @@ -18,6 +18,10 @@ corner, ``(x_max, y_max)`` are the coordinates of the bottom right bounding box corner and ``conf`` is the confidence for the predicted class. + + +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst index 8e6b721fdbd6af..6971a252d7d530 100644 --- a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst +++ b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst @@ -1,7 +1,7 @@ Convert a TensorFlow Model to OpenVINO™ ======================================= -.. _top: + | This short tutorial shows how to convert a TensorFlow `MobileNetV3 `__ @@ -13,7 +13,11 @@ Convert a TensorFlow Model to OpenVINO™ Runtime `__ and do inference with a sample image. -| **Table of contents**: + + +| .. _top: + +**Table of contents**: - `Imports <#imports>`__ - `Settings <#settings>`__ diff --git a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst index c310e83f56d4f0..4ff0c24ecd7c2a 100644 --- a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst +++ b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst @@ -1,7 +1,7 @@ Convert a PyTorch Model to ONNX and OpenVINO™ IR ================================================ -.. _top: + This tutorial demonstrates step-by-step instructions on how to do inference on a PyTorch semantic segmentation model, using OpenVINO @@ -35,6 +35,10 @@ plant, sheep, sofa, train, tv monitor** More information about the model is available in the `torchvision documentation `__ + + +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/102-pytorch-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-to-openvino-with-output.rst index cf6e83887ca1f9..be0a9038b08091 100644 --- a/docs/notebooks/102-pytorch-to-openvino-with-output.rst +++ b/docs/notebooks/102-pytorch-to-openvino-with-output.rst @@ -1,7 +1,7 @@ Convert a PyTorch Model to OpenVINO™ IR ======================================= -.. _top: + This tutorial demonstrates step-by-step instructions on how to do inference on a PyTorch classification model using OpenVINO Runtime. @@ -31,6 +31,10 @@ but elevated to the design space level. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. + + +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst index 082be1d66439ef..94f284cf6741ed 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst @@ -1,7 +1,7 @@ Convert a PaddlePaddle Model to OpenVINO™ IR ============================================ -.. _top: + This notebook shows how to convert a MobileNetV3 model from `PaddleHub `__, pre-trained @@ -16,6 +16,10 @@ IR model. Source of the `model `__. + + +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/104-model-tools-with-output.rst b/docs/notebooks/104-model-tools-with-output.rst index 62dcd3132ea343..441028017b47b5 100644 --- a/docs/notebooks/104-model-tools-with-output.rst +++ b/docs/notebooks/104-model-tools-with-output.rst @@ -1,13 +1,15 @@ Working with Open Model Zoo Models ================================== -.. _top: + This tutorial shows how to download a model from `Open Model Zoo `__, convert it to OpenVINO™ IR format, show information about the model, and benchmark the model. +.. _top: + **Table of contents**: - `OpenVINO and Open Model Zoo Tools <#openvino-and-open-model-zoo-tools>`__ diff --git a/docs/notebooks/105-language-quantize-bert-with-output.rst b/docs/notebooks/105-language-quantize-bert-with-output.rst index cbd1ec2b557456..c7cdfb210868e0 100644 --- a/docs/notebooks/105-language-quantize-bert-with-output.rst +++ b/docs/notebooks/105-language-quantize-bert-with-output.rst @@ -1,7 +1,7 @@ Quantize NLP models with Post-Training Quantization ​in NNCF ============================================================ -.. _top: + This tutorial demonstrates how to apply ``INT8`` quantization to the Natural Language Processing model known as @@ -24,6 +24,10 @@ and datasets. It consists of the following steps: - Compare the performance of the original, converted and quantized models. + + +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/106-auto-device-with-output.rst b/docs/notebooks/106-auto-device-with-output.rst index 3e51a92ee2eb82..98166495d23c74 100644 --- a/docs/notebooks/106-auto-device-with-output.rst +++ b/docs/notebooks/106-auto-device-with-output.rst @@ -1,8 +1,6 @@ Automatic Device Selection with OpenVINO™ ========================================= -.. _top: - The `Auto device `__ (or AUTO in short) selects the most suitable device for inference by @@ -32,6 +30,10 @@ first inference. auto + + +.. _top: + **Table of contents**: - `Import modules and create Core <#import-modules-and-create-core>`__ diff --git a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst index 8b1b221b0aa470..39cf07b445206f 100644 --- a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst +++ b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst @@ -1,8 +1,6 @@ Quantize Speech Recognition Models using NNCF PTQ API ===================================================== -.. _top: - This tutorial demonstrates how to use the NNCF (Neural Network Compression Framework) 8-bit quantization in post-training mode (without the fine-tuning pipeline) to optimize the speech recognition model, @@ -21,6 +19,10 @@ steps: - Compare performance of the original and quantized models. - Compare Accuracy of the Original and Quantized Models. + + +.. _top: + **Table of contents**: - `Download and prepare model <#download-and-prepare-model>`__ diff --git a/docs/notebooks/108-gpu-device-with-output.rst b/docs/notebooks/108-gpu-device-with-output.rst index 78eec1cf09b926..9d7f69faec7efe 100644 --- a/docs/notebooks/108-gpu-device-with-output.rst +++ b/docs/notebooks/108-gpu-device-with-output.rst @@ -1,6 +1,8 @@ Working with GPUs in OpenVINO™ ============================== + + .. _top: **Table of contents**: diff --git a/docs/notebooks/109-latency-tricks-with-output.rst b/docs/notebooks/109-latency-tricks-with-output.rst index f939f5e5d4afac..5d2d14fa85d4c4 100644 --- a/docs/notebooks/109-latency-tricks-with-output.rst +++ b/docs/notebooks/109-latency-tricks-with-output.rst @@ -1,8 +1,6 @@ Performance tricks in OpenVINO for latency mode =============================================== -.. _top: - The goal of this notebook is to provide a step-by-step tutorial for improving performance for inferencing in a latency mode. Low latency is especially desired in real-time applications when the results are needed @@ -51,6 +49,10 @@ optimize performance on OpenVINO IR files in A similar notebook focused on the throughput mode is available `here <109-throughput-tricks-with-output.html>`__. + + +.. _top: + **Table of contents**: - `Data <#data>`__ diff --git a/docs/notebooks/109-throughput-tricks-with-output.rst b/docs/notebooks/109-throughput-tricks-with-output.rst index d01b7d3f3dcfb1..c5e7a2c9646629 100644 --- a/docs/notebooks/109-throughput-tricks-with-output.rst +++ b/docs/notebooks/109-throughput-tricks-with-output.rst @@ -1,7 +1,7 @@ Performance tricks in OpenVINO for throughput mode ================================================== -.. _top: + The goal of this notebook is to provide a step-by-step tutorial for improving performance for inferencing in a throughput mode. High @@ -46,6 +46,10 @@ optimize performance on OpenVINO IR files in A similar notebook focused on the latency mode is available `here <109-latency-tricks-with-output.html>`__. + + +.. _top: + **Table of contents**: - `Data <#data>`__ diff --git a/docs/notebooks/110-ct-scan-live-inference-with-output.rst b/docs/notebooks/110-ct-scan-live-inference-with-output.rst index 7d543aa06d8e11..0f3e10cca74df6 100644 --- a/docs/notebooks/110-ct-scan-live-inference-with-output.rst +++ b/docs/notebooks/110-ct-scan-live-inference-with-output.rst @@ -1,8 +1,6 @@ Live Inference and Benchmark CT-scan Data with OpenVINO™ ======================================================== -.. _top: - Kidney Segmentation with PyTorch Lightning and OpenVINO™ - Part 4 ----------------------------------------------------------------- @@ -30,6 +28,10 @@ notebook. For demonstration purposes, this tutorial will download one converted CT scan to use for inference. + + +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst index 2ff15e5eed48d3..b7089acadd661b 100644 --- a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst +++ b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst @@ -1,8 +1,6 @@ Quantize a Segmentation Model and Show Live Inference ===================================================== -.. _top: - Kidney Segmentation with PyTorch Lightning and OpenVINO™ - Part 3 ----------------------------------------------------------------- @@ -55,6 +53,10 @@ demonstration purposes, this tutorial will download one converted CT scan and use that scan for quantization and inference. For production purposes, use a representative dataset for quantizing the model. + + +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/111-yolov5-quantization-migration-with-output.rst b/docs/notebooks/111-yolov5-quantization-migration-with-output.rst index 230ace7db8c8d6..6181e22d00014e 100644 --- a/docs/notebooks/111-yolov5-quantization-migration-with-output.rst +++ b/docs/notebooks/111-yolov5-quantization-migration-with-output.rst @@ -1,8 +1,6 @@ Migrate quantization from POT API to NNCF API ============================================= -.. _top: - This tutorial demonstrates how to migrate quantization pipeline written using the OpenVINO `Post-Training Optimization Tool (POT) `__ to `NNCF Post-Training Quantization API `__. @@ -23,6 +21,9 @@ The tutorial consists from the following parts: 7. Compare performance FP32 and INT8 models + +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst index 16c64286c2bd6d..69d0e04db139f0 100644 --- a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst +++ b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst @@ -1,8 +1,6 @@ Post-Training Quantization of PyTorch models with NNCF ====================================================== -.. _top: - The goal of this tutorial is to demonstrate how to use the NNCF (Neural Network Compression Framework) 8-bit quantization in post-training mode (without the fine-tuning pipeline) to optimize a PyTorch model for the @@ -27,6 +25,9 @@ quantization, not demanding the fine-tuning of the model. notebook. + +.. _top: + **Table of contents**: - `Preparations <#preparations>`__ diff --git a/docs/notebooks/113-image-classification-quantization-with-output.rst b/docs/notebooks/113-image-classification-quantization-with-output.rst index d72f5e3e4c061f..15e6e52b6f598a 100644 --- a/docs/notebooks/113-image-classification-quantization-with-output.rst +++ b/docs/notebooks/113-image-classification-quantization-with-output.rst @@ -1,7 +1,7 @@ Quantization of Image Classification Models =========================================== -.. _top: + This tutorial demonstrates how to apply ``INT8`` quantization to Image Classification model using @@ -21,6 +21,8 @@ This tutorial consists of the following steps: - Compare performance of the original and quantized models. - Compare results on one picture. +.. _top: + **Table of contents**: - `Prepare the Model <#prepare-the-model>`__ diff --git a/docs/notebooks/115-async-api-with-output.rst b/docs/notebooks/115-async-api-with-output.rst index 9f59cbc78b271b..bec3bc9e219d8d 100644 --- a/docs/notebooks/115-async-api-with-output.rst +++ b/docs/notebooks/115-async-api-with-output.rst @@ -1,7 +1,7 @@ Asynchronous Inference with OpenVINO™ ===================================== -.. _top: + This notebook demonstrates how to use the `Async API `__ @@ -14,6 +14,8 @@ in parallel (for example, populating inputs or scheduling other requests) rather than wait for the current inference to complete first. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/116-sparsity-optimization-with-output.rst b/docs/notebooks/116-sparsity-optimization-with-output.rst index aa321a6b57e442..532094888deafe 100644 --- a/docs/notebooks/116-sparsity-optimization-with-output.rst +++ b/docs/notebooks/116-sparsity-optimization-with-output.rst @@ -1,7 +1,7 @@ Accelerate Inference of Sparse Transformer Models with OpenVINO™ and 4th Gen Intel® Xeon® Scalable Processors ============================================================================================================= -.. _top: + This tutorial demonstrates how to improve performance of sparse Transformer models with `OpenVINO `__ on 4th @@ -21,6 +21,8 @@ consists of the following steps: integration with Hugging Face Optimum. - Compare sparse 8-bit vs. dense 8-bit inference performance. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/117-model-server-with-output.rst b/docs/notebooks/117-model-server-with-output.rst index 54989d2a0e7023..7cf130e876b78c 100644 --- a/docs/notebooks/117-model-server-with-output.rst +++ b/docs/notebooks/117-model-server-with-output.rst @@ -1,7 +1,7 @@ Hello Model Server ================== -.. _top: + Introduction to OpenVINO™ Model Server (OVMS). @@ -33,6 +33,8 @@ deployment: |ovms_diagram| +.. _top: + **Table of contents**: - `Serving with OpenVINO Model Server <#serving-with-openvino-model-server1>`__ diff --git a/docs/notebooks/118-optimize-preprocessing-with-output.rst b/docs/notebooks/118-optimize-preprocessing-with-output.rst index c76a89861375f1..e9f19e107c96a3 100644 --- a/docs/notebooks/118-optimize-preprocessing-with-output.rst +++ b/docs/notebooks/118-optimize-preprocessing-with-output.rst @@ -1,7 +1,7 @@ Optimize Preprocessing ====================== -.. _top: + When input data does not fit the model input tensor perfectly, additional operations/steps are needed to transform the data to the @@ -27,6 +27,8 @@ This tutorial include following steps: - Comparing results on one picture. - Comparing performance. +.. _top: + **Table of contents**: - `Settings <#settings>`__ diff --git a/docs/notebooks/119-tflite-to-openvino-with-output.rst b/docs/notebooks/119-tflite-to-openvino-with-output.rst index aa0bc8713a3973..6bf4b8924cc08e 100644 --- a/docs/notebooks/119-tflite-to-openvino-with-output.rst +++ b/docs/notebooks/119-tflite-to-openvino-with-output.rst @@ -1,7 +1,7 @@ Convert a Tensorflow Lite Model to OpenVINO™ ============================================ -.. _top: + `TensorFlow Lite `__, often referred to as TFLite, is an open source library developed for deploying @@ -17,6 +17,8 @@ After creating the OpenVINO IR, load the model in `OpenVINO Runtime `__ and do inference with a sample image. +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst index 39fcef5cec8fe6..9e2ee53134913b 100644 --- a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst +++ b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst @@ -1,7 +1,7 @@ Convert a TensorFlow Object Detection Model to OpenVINO™ ======================================================== -.. _top: + `TensorFlow `__, or TF for short, is an open-source framework for machine learning. @@ -26,6 +26,8 @@ After creating the OpenVINO IR, load the model in `OpenVINO Runtime `__ and do inference with a sample image. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/121-convert-to-openvino-with-output.rst b/docs/notebooks/121-convert-to-openvino-with-output.rst index 5da2d317e3afad..cf93b94ac741a3 100644 --- a/docs/notebooks/121-convert-to-openvino-with-output.rst +++ b/docs/notebooks/121-convert-to-openvino-with-output.rst @@ -4,6 +4,8 @@ OpenVINO™ model conversion API This notebook shows how to convert a model from original framework format to OpenVINO Intermediate Representation (IR). +.. _top: + **Table of contents**: - `OpenVINO IR format <#openvino-ir-format>`__ diff --git a/docs/notebooks/122-speech-recognition-quantization-wav2vec2-with-output.rst b/docs/notebooks/122-speech-recognition-quantization-wav2vec2-with-output.rst new file mode 100644 index 00000000000000..4db1ac32fe921f --- /dev/null +++ b/docs/notebooks/122-speech-recognition-quantization-wav2vec2-with-output.rst @@ -0,0 +1,309 @@ +Quantize Speech Recognition Models with accuracy control using NNCF PTQ API +=========================================================================== + + + +This tutorial demonstrates how to apply ``INT8`` quantization with +accuracy control to the speech recognition model, known as +`Wav2Vec2 `__, +using the NNCF (Neural Network Compression Framework) 8-bit quantization +with accuracy control in post-training mode (without the fine-tuning +pipeline). This notebook uses a fine-tuned +`Wav2Vec2-Base-960h `__ +`PyTorch `__ model trained on the `LibriSpeech ASR +corpus `__. The tutorial is designed to be +extendable to custom models and datasets. It consists of the following +steps: + +- Download and prepare the Wav2Vec2 model and LibriSpeech dataset. +- Define data loading and accuracy validation functionality. +- Model quantization with accuracy control. +- Compare Accuracy of original PyTorch model, OpenVINO FP16 and INT8 + models. +- Compare performance of the original and quantized models. + +The advanced quantization flow allows to apply 8-bit quantization to the +model with control of accuracy metric. This is achieved by keeping the +most impactful operations within the model in the original precision. +The flow is based on the `Basic 8-bit +quantization `__ +and has the following differences: + +- Besides the calibration dataset, a validation dataset is required to + compute the accuracy metric. Both datasets can refer to the same data + in the simplest case. +- Validation function, used to compute accuracy metric is required. It + can be a function that is already available in the source framework + or a custom function. +- Since accuracy validation is run several times during the + quantization process, quantization with accuracy control can take + more time than the Basic 8-bit quantization flow. +- The resulted model can provide smaller performance improvement than + the Basic 8-bit quantization flow because some of the operations are + kept in the original precision. + +.. note:: + + Currently, 8-bit quantization with accuracy control in NNCF + is available only for models in OpenVINO representation. + +The steps for the quantization with accuracy control are described +below. + + + +.. _top: + +**Table of contents**: + +- `Imports <#imports>`__ +- `Prepare the Model <#prepare-the-model>`__ +- `Prepare LibriSpeech Dataset <#prepare-librispeech-dataset>`__ +- `Prepare calibration and validation datasets <#prepare-calibration-and-validation-datasets>`__ +- `Prepare validation function <#prepare-validation-function>`__ +- `Run quantization with accuracy control <#run-quantization-with-accuracy-control>`__ +- `Model Usage Example <#model-usage-example>`__ +- `Compare Accuracy of the Original and Quantized Models <#compare-accuracy-of-the-original-and-quantized-models>`__ + + +.. code:: ipython2 + + # !pip install -q "openvino-dev>=2023.1.0" "nncf>=2.6.0" + !pip install -q "openvino==2023.1.0.dev20230811" + !pip install git+https://github.com/openvinotoolkit/nncf.git@develop + !pip install -q soundfile librosa transformers torch datasets torchmetrics + +Imports `⇑ <#top>`__ +############################################################################################################################### + +.. code:: ipython2 + + import numpy as np + import torch + + from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor + +Prepare the Model `⇑ <#top>`__ +############################################################################################################################### + +For instantiating PyTorch model class, +we should use ``Wav2Vec2ForCTC.from_pretrained`` method with providing +model ID for downloading from HuggingFace hub. Model weights and +configuration files will be downloaded automatically in first time +usage. Keep in mind that downloading the files can take several minutes +and depends on your internet connection. + +Additionally, we can create processor class which is responsible for +model specific pre- and post-processing steps. + +.. code:: ipython2 + + BATCH_SIZE = 1 + MAX_SEQ_LENGTH = 30480 + + + torch_model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h", ctc_loss_reduction="mean") + processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") + +Convert it to the OpenVINO Intermediate Representation (OpenVINO IR) + +.. code:: ipython2 + + import openvino + + + default_input = torch.zeros([1, MAX_SEQ_LENGTH], dtype=torch.float) + ov_model = openvino.convert_model(torch_model, example_input=default_input) + +Prepare LibriSpeech Dataset `⇑ <#top>`__ +############################################################################################################################### + +For demonstration purposes, we will use short dummy version of +LibriSpeech dataset - ``patrickvonplaten/librispeech_asr_dummy`` to +speed up model evaluation. Model accuracy can be different from reported +in the paper. For reproducing original accuracy, use ``librispeech_asr`` +dataset. + +.. code:: ipython2 + + from datasets import load_dataset + + + dataset = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") + test_sample = dataset[0]["audio"] + + + # define preprocessing function for converting audio to input values for model + def map_to_input(batch): + preprocessed_signal = processor(batch["audio"]["array"], return_tensors="pt", padding="longest", sampling_rate=batch['audio']['sampling_rate']) + input_values = preprocessed_signal.input_values + batch['input_values'] = input_values + return batch + + + # apply preprocessing function to dataset and remove audio column, to save memory as we do not need it anymore + dataset = dataset.map(map_to_input, batched=False, remove_columns=["audio"]) + +Prepare calibration dataset `⇑ <#top>`__ +############################################################################################################################### + +.. code:: ipython2 + + import nncf + + + def transform_fn(data_item): + """ + Extract the model's input from the data item. + The data item here is the data item that is returned from the data source per iteration. + This function should be passed when the data item cannot be used as model's input. + """ + return np.array(data_item["input_values"]) + + + calibration_dataset = nncf.Dataset(dataset, transform_fn) + +Prepare validation function `⇑ <#top>`__ +############################################################################################################################### + +Define the validation function. + +.. code:: ipython2 + + from torchmetrics import WordErrorRate + from tqdm.notebook import tqdm + + + def validation_fn(model, dataset): + """ + Calculate and returns a metric for the model. + """ + wer = WordErrorRate() + for sample in tqdm(dataset): + # run infer function on sample + output = model.output(0) + logits = model(np.array(sample['input_values']))[output] + predicted_ids = np.argmax(logits, axis=-1) + transcription = processor.batch_decode(torch.from_numpy(predicted_ids)) + + # update metric on sample result + wer.update(transcription, [sample['text']]) + + result = wer.compute() + + return 1 - result + +Run quantization with accuracy control `⇑ <#top>`__ +############################################################################################################################### + +You should provide +the calibration dataset and the validation dataset. It can be the same +dataset. - parameter ``max_drop`` defines the accuracy drop threshold. +The quantization process stops when the degradation of accuracy metric +on the validation dataset is less than the ``max_drop``. The default +value is 0.01. NNCF will stop the quantization and report an error if +the ``max_drop`` value can’t be reached. - ``drop_type`` defines how the +accuracy drop will be calculated: ABSOLUTE (used by default) or +RELATIVE. - ``ranking_subset_size`` - size of a subset that is used to +rank layers by their contribution to the accuracy drop. Default value is +300, and the more samples it has the better ranking, potentially. Here +we use the value 25 to speed up the execution. + +.. note:: + + Execution can take tens of minutes and requires up to 10 GB + of free memory + + +.. code:: ipython2 + + from nncf.quantization.advanced_parameters import AdvancedAccuracyRestorerParameters + from nncf.parameters import ModelType + + quantized_model = nncf.quantize_with_accuracy_control( + ov_model, + calibration_dataset=calibration_dataset, + validation_dataset=calibration_dataset, + validation_fn=validation_fn, + max_drop=0.01, + drop_type=nncf.DropType.ABSOLUTE, + model_type=ModelType.TRANSFORMER, + advanced_accuracy_restorer_parameters=AdvancedAccuracyRestorerParameters( + ranking_subset_size=25 + ), + ) + +Model Usage Example `⇑ <#top>`__ +############################################################################################################################### + +.. code:: ipython2 + + import IPython.display as ipd + + + ipd.Audio(test_sample["array"], rate=16000) + +.. code:: ipython2 + + core = openvino.Core() + + compiled_quantized_model = core.compile_model(model=quantized_model, device_name='CPU') + + input_data = np.expand_dims(test_sample["array"], axis=0) + +Next, make a prediction. + +.. code:: ipython2 + + predictions = compiled_quantized_model([input_data])[0] + predicted_ids = np.argmax(predictions, axis=-1) + transcription = processor.batch_decode(torch.from_numpy(predicted_ids)) + transcription + +Compare Accuracy of the Original and Quantized Models `⇑ <#top>`__ +############################################################################################################################### + +- Define dataloader for test dataset. +- Define functions to get inference for PyTorch and OpenVINO models. +- Define functions to compute Word Error Rate. + +.. code:: ipython2 + + # inference function for pytorch + def torch_infer(model, sample): + logits = model(torch.Tensor(sample['input_values'])).logits + # take argmax and decode + predicted_ids = torch.argmax(logits, dim=-1) + transcription = processor.batch_decode(predicted_ids) + return transcription + + + # inference function for openvino + def ov_infer(model, sample): + output = model.output(0) + logits = model(np.array(sample['input_values']))[output] + predicted_ids = np.argmax(logits, axis=-1) + transcription = processor.batch_decode(torch.from_numpy(predicted_ids)) + return transcription + + + def compute_wer(dataset, model, infer_fn): + wer = WordErrorRate() + for sample in tqdm(dataset): + # run infer function on sample + transcription = infer_fn(model, sample) + # update metric on sample result + wer.update(transcription, [sample['text']]) + # finalize metric calculation + result = wer.compute() + return result + +Now, compute WER for the original PyTorch model and quantized model. + +.. code:: ipython2 + + pt_result = compute_wer(dataset, torch_model, torch_infer) + quantized_result = compute_wer(dataset, compiled_quantized_model, ov_infer) + + print(f'[PyTorch] Word Error Rate: {pt_result:.4f}') + print(f'[Quantized OpenVino] Word Error Rate: {quantized_result:.4f}') diff --git a/docs/notebooks/122-yolov8-quantization-with-accuracy-control-with-output.rst b/docs/notebooks/122-yolov8-quantization-with-accuracy-control-with-output.rst new file mode 100644 index 00000000000000..7bba4ef46f0c73 --- /dev/null +++ b/docs/notebooks/122-yolov8-quantization-with-accuracy-control-with-output.rst @@ -0,0 +1,306 @@ +Convert and Optimize YOLOv8 with OpenVINO™ +========================================== + + + +The YOLOv8 algorithm developed by Ultralytics is a cutting-edge, +state-of-the-art (SOTA) model that is designed to be fast, accurate, and +easy to use, making it an excellent choice for a wide range of object +detection, image segmentation, and image classification tasks. More +details about its realization can be found in the original model +`repository `__. + +This tutorial demonstrates step-by-step instructions on how to run apply +quantization with accuracy control to PyTorch YOLOv8. The advanced +quantization flow allows to apply 8-bit quantization to the model with +control of accuracy metric. This is achieved by keeping the most +impactful operations within the model in the original precision. The +flow is based on the `Basic 8-bit +quantization `__ +and has the following differences: + +- Besides the calibration dataset, a validation dataset is required to + compute the accuracy metric. Both datasets can refer to the same data + in the simplest case. +- Validation function, used to compute accuracy metric is required. It + can be a function that is already available in the source framework + or a custom function. +- Since accuracy validation is run several times during the + quantization process, quantization with accuracy control can take + more time than the Basic 8-bit quantization flow. +- The resulted model can provide smaller performance improvement than + the Basic 8-bit quantization flow because some of the operations are + kept in the original precision. + +.. note:: + + Currently, 8-bit quantization with accuracy control in NNCF + is available only for models in OpenVINO representation. + +The steps for the quantization with accuracy control are described +below. + +The tutorial consists of the following steps: + + + +- `Prerequisites <#prerequisites>`__ +- `Get Pytorch model and OpenVINO IR model <#get-pytorch-model-and-openvino-ir-model>`__ +- `Define validator and data loader <#define-validator-and-data-loader>`__ +- `Prepare calibration and validation datasets <#prepare-calibration-and-validation-datasets>`__ +- `Prepare validation function <#prepare-validation-function>`__ +- `Run quantization with accuracy control <#run-quantization-with-accuracy-control>`__ +- `Compare Accuracy and Performance of the Original and Quantized Models <#compare-accuracy-and-performance-of-the-original-and-quantized-models>`__ + +Prerequisites `⇑ <#top>`__ +############################################################################################################################### + + +Install necessary packages. + +.. code:: ipython2 + + !pip install -q "openvino==2023.1.0.dev20230811" + !pip install git+https://github.com/openvinotoolkit/nncf.git@develop + !pip install -q "ultralytics==8.0.43" + +Get Pytorch model and OpenVINO IR model `⇑ <#top>`__ +############################################################################################################################### + +Generally, PyTorch models represent an instance of the +`torch.nn.Module `__ +class, initialized by a state dictionary with model weights. We will use +the YOLOv8 nano model (also known as ``yolov8n``) pre-trained on a COCO +dataset, which is available in this +`repo `__. Similar steps are +also applicable to other YOLOv8 models. Typical steps to obtain a +pre-trained model: + +1. Create an instance of a model class. +2. Load a checkpoint state dict, which contains the pre-trained model + weights. + +In this case, the creators of the model provide an API that enables +converting the YOLOv8 model to ONNX and then to OpenVINO IR. Therefore, +we do not need to do these steps manually. + +.. code:: ipython2 + + import os + from pathlib import Path + + from ultralytics import YOLO + from ultralytics.yolo.cfg import get_cfg + from ultralytics.yolo.data.utils import check_det_dataset + from ultralytics.yolo.engine.validator import BaseValidator as Validator + from ultralytics.yolo.utils import DATASETS_DIR + from ultralytics.yolo.utils import DEFAULT_CFG + from ultralytics.yolo.utils import ops + from ultralytics.yolo.utils.metrics import ConfusionMatrix + + ROOT = os.path.abspath('') + + MODEL_NAME = "yolov8n-seg" + + model = YOLO(f"{ROOT}/{MODEL_NAME}.pt") + args = get_cfg(cfg=DEFAULT_CFG) + args.data = "coco128-seg.yaml" + +Load model. + +.. code:: ipython2 + + import openvino + + + model_path = Path(f"{ROOT}/{MODEL_NAME}_openvino_model/{MODEL_NAME}.xml") + if not model_path.exists(): + model.export(format="openvino", dynamic=True, half=False) + + ov_model = openvino.Core().read_model(model_path) + +Define validator and data loader `⇑ <#top>`__ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The original model +repository uses a ``Validator`` wrapper, which represents the accuracy +validation pipeline. It creates dataloader and evaluation metrics and +updates metrics on each data batch produced by the dataloader. Besides +that, it is responsible for data preprocessing and results +postprocessing. For class initialization, the configuration should be +provided. We will use the default setup, but it can be replaced with +some parameters overriding to test on custom data. The model has +connected the ``ValidatorClass`` method, which creates a validator class +instance. + +.. code:: ipython2 + + validator = model.ValidatorClass(args) + validator.data = check_det_dataset(args.data) + data_loader = validator.get_dataloader(f"{DATASETS_DIR}/coco128-seg", 1) + + validator.is_coco = True + validator.class_map = ops.coco80_to_coco91_class() + validator.names = model.model.names + validator.metrics.names = validator.names + validator.nc = model.model.model[-1].nc + validator.nm = 32 + validator.process = ops.process_mask + validator.plot_masks = [] + +Prepare calibration and validation datasets `⇑ <#top>`__ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +We can use one dataset as calibration and validation datasets. Name it +``quantization_dataset``. + +.. code:: ipython2 + + from typing import Dict + + import nncf + + + def transform_fn(data_item: Dict): + input_tensor = validator.preprocess(data_item)["img"].numpy() + return input_tensor + + + quantization_dataset = nncf.Dataset(data_loader, transform_fn) + +Prepare validation function `⇑ <#top>`__ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. code:: ipython2 + + from functools import partial + + import torch + from nncf.quantization.advanced_parameters import AdvancedAccuracyRestorerParameters + + + def validation_ac( + compiled_model: openvino.CompiledModel, + validation_loader: torch.utils.data.DataLoader, + validator: Validator, + num_samples: int = None, + ) -> float: + validator.seen = 0 + validator.jdict = [] + validator.stats = [] + validator.batch_i = 1 + validator.confusion_matrix = ConfusionMatrix(nc=validator.nc) + num_outputs = len(compiled_model.outputs) + + counter = 0 + for batch_i, batch in enumerate(validation_loader): + if num_samples is not None and batch_i == num_samples: + break + batch = validator.preprocess(batch) + results = compiled_model(batch["img"]) + if num_outputs == 1: + preds = torch.from_numpy(results[compiled_model.output(0)]) + else: + preds = [ + torch.from_numpy(results[compiled_model.output(0)]), + torch.from_numpy(results[compiled_model.output(1)]), + ] + preds = validator.postprocess(preds) + validator.update_metrics(preds, batch) + counter += 1 + stats = validator.get_stats() + if num_outputs == 1: + stats_metrics = stats["metrics/mAP50-95(B)"] + else: + stats_metrics = stats["metrics/mAP50-95(M)"] + print(f"Validate: dataset length = {counter}, metric value = {stats_metrics:.3f}") + + return stats_metrics + + + validation_fn = partial(validation_ac, validator=validator) + +Run quantization with accuracy control `⇑ <#top>`__ +############################################################################################################################### + +You should provide +the calibration dataset and the validation dataset. It can be the same +dataset. - parameter ``max_drop`` defines the accuracy drop threshold. +The quantization process stops when the degradation of accuracy metric +on the validation dataset is less than the ``max_drop``. The default +value is 0.01. NNCF will stop the quantization and report an error if +the ``max_drop`` value can’t be reached. - ``drop_type`` defines how the +accuracy drop will be calculated: ABSOLUTE (used by default) or +RELATIVE. - ``ranking_subset_size`` - size of a subset that is used to +rank layers by their contribution to the accuracy drop. Default value is +300, and the more samples it has the better ranking, potentially. Here +we use the value 25 to speed up the execution. + +.. note:: + + Execution can take tens of minutes and requires up to 15 GB + of free memory + +.. code:: ipython2 + + quantized_model = nncf.quantize_with_accuracy_control( + ov_model, + quantization_dataset, + quantization_dataset, + validation_fn=validation_fn, + max_drop=0.01, + preset=nncf.QuantizationPreset.MIXED, + advanced_accuracy_restorer_parameters=AdvancedAccuracyRestorerParameters( + ranking_subset_size=25, + num_ranking_processes=1 + ), + ) + +Compare Accuracy and Performance of the Original and Quantized Models `⇑ <#top>`__ +############################################################################################################################### + + +Now we can compare metrics of the Original non-quantized +OpenVINO IR model and Quantized OpenVINO IR model to make sure that the +``max_drop`` is not exceeded. + +.. code:: ipython2 + + import openvino + + core = openvino.Core() + quantized_compiled_model = core.compile_model(model=quantized_model, device_name='CPU') + compiled_ov_model = core.compile_model(model=ov_model, device_name='CPU') + + pt_result = validation_ac(compiled_ov_model, data_loader, validator) + quantized_result = validation_ac(quantized_compiled_model, data_loader, validator) + + + print(f'[Original OpenVino]: {pt_result:.4f}') + print(f'[Quantized OpenVino]: {quantized_result:.4f}') + +And compare performance. + +.. code:: ipython2 + + from pathlib import Path + # Set model directory + MODEL_DIR = Path("model") + MODEL_DIR.mkdir(exist_ok=True) + + ir_model_path = MODEL_DIR / 'ir_model.xml' + quantized_model_path = MODEL_DIR / 'quantized_model.xml' + + # Save models to use them in the commandline banchmark app + openvino.save_model(ov_model, ir_model_path, compress_to_fp16=False) + openvino.save_model(quantized_model, quantized_model_path, compress_to_fp16=False) + +.. code:: ipython2 + + # Inference Original model (OpenVINO IR) + ! benchmark_app -m $ir_model_path -shape "[1,3,640,640]" -d CPU -api async + +.. code:: ipython2 + + # Inference Quantized model (OpenVINO IR) + ! benchmark_app -m $quantized_model_path -shape "[1,3,640,640]" -d CPU -api async diff --git a/docs/notebooks/201-vision-monodepth-with-output.rst b/docs/notebooks/201-vision-monodepth-with-output.rst index 06ec0e5cd771bb..e98e4c37d8fdac 100644 --- a/docs/notebooks/201-vision-monodepth-with-output.rst +++ b/docs/notebooks/201-vision-monodepth-with-output.rst @@ -1,7 +1,7 @@ Monodepth Estimation with OpenVINO ================================== -.. _top: + This tutorial demonstrates Monocular Depth Estimation with MidasNet in OpenVINO. Model information can be found @@ -30,6 +30,8 @@ Transfer,” `__ in IEEE Transactions on Pattern Analysis and Machine Intelligence, doi: ``10.1109/TPAMI.2020.3019967``. +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/202-vision-superresolution-image-with-output.rst b/docs/notebooks/202-vision-superresolution-image-with-output.rst index 18ea80db89dbd5..2a9c26e53422db 100644 --- a/docs/notebooks/202-vision-superresolution-image-with-output.rst +++ b/docs/notebooks/202-vision-superresolution-image-with-output.rst @@ -1,7 +1,7 @@ Single Image Super Resolution with OpenVINO™ ============================================ -.. _top: + Super Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook shows the @@ -16,6 +16,8 @@ Resolution,” `__ 2018 24th International Conference on Pattern Recognition (ICPR), 2018, pp. 2777-2784, doi: 10.1109/ICPR.2018.8545760. +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/202-vision-superresolution-video-with-output.rst b/docs/notebooks/202-vision-superresolution-video-with-output.rst index 840d31c84ee1fc..7b48a8c64eddaa 100644 --- a/docs/notebooks/202-vision-superresolution-video-with-output.rst +++ b/docs/notebooks/202-vision-superresolution-video-with-output.rst @@ -1,7 +1,7 @@ Video Super Resolution with OpenVINO™ ===================================== -.. _top: + Super Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook applies @@ -23,6 +23,8 @@ pp. 2777-2784, doi: 10.1109/ICPR.2018.8545760. video. +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/203-meter-reader-with-output.rst b/docs/notebooks/203-meter-reader-with-output.rst index e45a6d9973c76f..eeec4746977f14 100644 --- a/docs/notebooks/203-meter-reader-with-output.rst +++ b/docs/notebooks/203-meter-reader-with-output.rst @@ -1,7 +1,7 @@ Industrial Meter Reader ======================= -.. _top: + This notebook shows how to create a industrial meter reader with OpenVINO Runtime. We use the pre-trained @@ -21,6 +21,8 @@ to build up a multiple inference task pipeline: workflow +.. _top: + **Table of contents**: - `Import <#import>`__ diff --git a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst index c516000c84ca82..29f412a4194065 100644 --- a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst +++ b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst @@ -1,7 +1,7 @@ Semantic Segmentation with OpenVINO™ using Segmenter ==================================================== -.. _top: + Semantic segmentation is a difficult computer vision problem with many applications such as autonomous driving, robotics, augmented reality, @@ -28,6 +28,8 @@ paper: `Segmenter: Transformer for Semantic Segmentation `__ or in the `repository `__. +.. _top: + **Table of contents**: - `Get and prepare PyTorch model <#get-and-prepare-pytorch-model>`__ diff --git a/docs/notebooks/205-vision-background-removal-with-output.rst b/docs/notebooks/205-vision-background-removal-with-output.rst index cd53815c483fec..1c4ae2d1696ec4 100644 --- a/docs/notebooks/205-vision-background-removal-with-output.rst +++ b/docs/notebooks/205-vision-background-removal-with-output.rst @@ -1,7 +1,7 @@ Image Background Removal with U^2-Net and OpenVINO™ =================================================== -.. _top: + This notebook demonstrates background removal in images using U\ :math:`^2`-Net and OpenVINO. @@ -17,6 +17,8 @@ The model source is available `here `__. +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/206-vision-paddlegan-anime-with-output.rst b/docs/notebooks/206-vision-paddlegan-anime-with-output.rst index 7974ce25de12e8..32cafa0c20c30a 100644 --- a/docs/notebooks/206-vision-paddlegan-anime-with-output.rst +++ b/docs/notebooks/206-vision-paddlegan-anime-with-output.rst @@ -1,7 +1,7 @@ Photos to Anime with PaddleGAN and OpenVINO =========================================== -.. _top: + This tutorial demonstrates converting a `PaddlePaddle/PaddleGAN `__ @@ -16,6 +16,8 @@ documentation `__ diff --git a/docs/notebooks/207-vision-paddlegan-superresolution-with-output.rst b/docs/notebooks/207-vision-paddlegan-superresolution-with-output.rst index 5967a0bf7b199c..b19bfc982c628f 100644 --- a/docs/notebooks/207-vision-paddlegan-superresolution-with-output.rst +++ b/docs/notebooks/207-vision-paddlegan-superresolution-with-output.rst @@ -1,7 +1,7 @@ Super Resolution with PaddleGAN and OpenVINO™ ============================================= -.. _top: + This notebook demonstrates converting the RealSR (real-world super-resolution) model from @@ -18,6 +18,8 @@ from CVPR 2020. This notebook works best with small images (up to 800x600 resolution). +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/208-optical-character-recognition-with-output.rst b/docs/notebooks/208-optical-character-recognition-with-output.rst index 0815ae2d3cd700..871f7110dd1ef9 100644 --- a/docs/notebooks/208-optical-character-recognition-with-output.rst +++ b/docs/notebooks/208-optical-character-recognition-with-output.rst @@ -1,7 +1,7 @@ Optical Character Recognition (OCR) with OpenVINO™ ================================================== -.. _top: + This tutorial demonstrates how to perform optical character recognition (OCR) with OpenVINO models. It is a continuation of the @@ -21,6 +21,8 @@ Zoo `__. For more information, refer to the `104-model-tools <104-model-tools-with-output.html>`__ tutorial. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/209-handwritten-ocr-with-output.rst b/docs/notebooks/209-handwritten-ocr-with-output.rst index e0f5913988f5de..8aa26383d21a6c 100644 --- a/docs/notebooks/209-handwritten-ocr-with-output.rst +++ b/docs/notebooks/209-handwritten-ocr-with-output.rst @@ -1,7 +1,7 @@ Handwritten Chinese and Japanese OCR with OpenVINO™ =================================================== -.. _top: + In this tutorial, we perform optical character recognition (OCR) for handwritten Chinese (simplified) and Japanese. An OCR tutorial using the @@ -19,6 +19,8 @@ and `scut_ept `__ charlists are used. Both models are available on `Open Model Zoo `__. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/210-slowfast-video-recognition-with-output.rst b/docs/notebooks/210-slowfast-video-recognition-with-output.rst index e795d99a6ef5ee..c2bcfa25c5d064 100644 --- a/docs/notebooks/210-slowfast-video-recognition-with-output.rst +++ b/docs/notebooks/210-slowfast-video-recognition-with-output.rst @@ -1,7 +1,7 @@ Video Recognition using SlowFast and OpenVINO™ ============================================== -.. _top: + Teaching machines to detect, understand and analyze the contents of images has been one of the more well-known and well-studied problems in @@ -40,6 +40,8 @@ This tutorial consists of the following steps .. |image0| image:: https://user-images.githubusercontent.com/34324155/143044111-94676f64-7ba8-4081-9011-f8054bed7030.png +.. _top: + **Table of contents**: - `Prepare PyTorch Model <#prepare-pytorch-model>`__ diff --git a/docs/notebooks/211-speech-to-text-with-output.rst b/docs/notebooks/211-speech-to-text-with-output.rst index 080d8b092c9a09..95d919eb6d637f 100644 --- a/docs/notebooks/211-speech-to-text-with-output.rst +++ b/docs/notebooks/211-speech-to-text-with-output.rst @@ -1,7 +1,7 @@ Speech to Text with OpenVINO™ ============================= -.. _top: + This tutorial demonstrates speech-to-text recognition with OpenVINO. @@ -13,6 +13,8 @@ with Connectionist Temporal Classification (CTC) loss. The model is available from `Open Model Zoo `__. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/212-pyannote-speaker-diarization-with-output.rst b/docs/notebooks/212-pyannote-speaker-diarization-with-output.rst index 8fabfbf8b90a1e..2e8af021276050 100644 --- a/docs/notebooks/212-pyannote-speaker-diarization-with-output.rst +++ b/docs/notebooks/212-pyannote-speaker-diarization-with-output.rst @@ -1,7 +1,7 @@ Speaker diarization =================== -.. _top: + Speaker diarization is the process of partitioning an audio stream containing human speech into homogeneous segments according to the @@ -39,6 +39,8 @@ card `__, `repo `__ and `paper `__. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/213-question-answering-with-output.rst b/docs/notebooks/213-question-answering-with-output.rst index e3fc0ee6c8d144..9b1be824b7a9a9 100644 --- a/docs/notebooks/213-question-answering-with-output.rst +++ b/docs/notebooks/213-question-answering-with-output.rst @@ -1,7 +1,7 @@ Interactive question answering with OpenVINO™ ============================================= -.. _top: + This demo shows interactive question answering with OpenVINO, using `small BERT-large-like @@ -11,6 +11,8 @@ larger BERT-large model. The model comes from `Open Model Zoo `__. Final part of this notebook provides live inference results from your inputs. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/214-grammar-correction-with-output.rst b/docs/notebooks/214-grammar-correction-with-output.rst index eaff3b6e620411..434aabbacd3490 100644 --- a/docs/notebooks/214-grammar-correction-with-output.rst +++ b/docs/notebooks/214-grammar-correction-with-output.rst @@ -1,7 +1,7 @@ Grammatical Error Correction with OpenVINO ========================================== -.. _top: + AI-based auto-correction products are becoming increasingly popular due to their ease of use, editing speed, and affordability. These products @@ -43,6 +43,8 @@ It consists of the following steps: Optimum `__. - Create an inference pipeline for grammatical error checking +.. _top: + **Table of contents**: - `How does it work? <#how-does-it-work>`__ diff --git a/docs/notebooks/215-image-inpainting-with-output.rst b/docs/notebooks/215-image-inpainting-with-output.rst index 85f762359ab612..f9ecfbafeeb006 100644 --- a/docs/notebooks/215-image-inpainting-with-output.rst +++ b/docs/notebooks/215-image-inpainting-with-output.rst @@ -1,7 +1,7 @@ Image In-painting with OpenVINO™ -------------------------------- -.. _top: + This notebook demonstrates how to use an image in-painting model with OpenVINO, using `GMCNN @@ -11,6 +11,8 @@ given a tampered image, is able to create something very similar to the original image. The Following pipeline will be used in this notebook. |pipeline| +.. _top: + **Table of contents**: - `Download the Model <#download-the-model>`__ diff --git a/docs/notebooks/216-attention-center-with-output.rst b/docs/notebooks/216-attention-center-with-output.rst index 07e5c69eedb58b..2a5dcfc7c8aba2 100644 --- a/docs/notebooks/216-attention-center-with-output.rst +++ b/docs/notebooks/216-attention-center-with-output.rst @@ -1,7 +1,7 @@ The attention center model with OpenVINO™ ========================================= -.. _top: + This notebook demonstrates how to use the `attention center model `__ with @@ -51,6 +51,8 @@ The attention center model has been trained with images from the `COCO dataset `__ annotated with saliency from the `SALICON dataset `__. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/217-vision-deblur-with-output.rst b/docs/notebooks/217-vision-deblur-with-output.rst index 3686de8db5f5dc..6e0f7067823a60 100644 --- a/docs/notebooks/217-vision-deblur-with-output.rst +++ b/docs/notebooks/217-vision-deblur-with-output.rst @@ -1,6 +1,8 @@ Deblur Photos with DeblurGAN-v2 and OpenVINO™ ============================================= + + .. _top: **Table of contents**: diff --git a/docs/notebooks/218-vehicle-detection-and-recognition-with-output.rst b/docs/notebooks/218-vehicle-detection-and-recognition-with-output.rst index c5237117f8a960..2bc8a6cd2e9d94 100644 --- a/docs/notebooks/218-vehicle-detection-and-recognition-with-output.rst +++ b/docs/notebooks/218-vehicle-detection-and-recognition-with-output.rst @@ -1,7 +1,7 @@ Vehicle Detection And Recognition with OpenVINO™ ================================================ -.. _top: + This tutorial demonstrates how to use two pre-trained models from `Open Model Zoo `__: @@ -19,6 +19,8 @@ As a result, you can get: result +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/219-knowledge-graphs-conve-with-output.rst b/docs/notebooks/219-knowledge-graphs-conve-with-output.rst index c623c3cfd0018e..07fd9413bcaf79 100644 --- a/docs/notebooks/219-knowledge-graphs-conve-with-output.rst +++ b/docs/notebooks/219-knowledge-graphs-conve-with-output.rst @@ -1,7 +1,7 @@ OpenVINO optimizations for Knowledge graphs =========================================== -.. _top: + The goal of this notebook is to showcase performance optimizations for the ConvE knowledge graph embeddings model using the Intel® Distribution @@ -18,6 +18,8 @@ The ConvE model is an implementation of the paper - sample dataset can be downloaded from: https://github.com/TimDettmers/ConvE/tree/master/countries/countries_S1 +.. _top: + **Table of contents**: - `Windows specific settings <#windows-specific-settings>`__ diff --git a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst index cd34355ccf9d6f..88d0874160f97a 100644 --- a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst +++ b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst @@ -1,7 +1,7 @@ Cross-lingual Books Alignment with Transformers and OpenVINO™ ============================================================= -.. _top: + Cross-lingual text alignment is the task of matching sentences in a pair of texts that are translations of each other. In this notebook, you’ll @@ -39,6 +39,8 @@ Prerequisites - ``seaborn`` - for alignment matrix visualization - ``ipywidgets`` - for displaying HTML and JS output in the notebook +.. _top: + **Table of contents**: - `Get Books <#get-books>`__ diff --git a/docs/notebooks/221-machine-translation-with-output.rst b/docs/notebooks/221-machine-translation-with-output.rst index f8c36d8b482fb4..b4103a43f252bd 100644 --- a/docs/notebooks/221-machine-translation-with-output.rst +++ b/docs/notebooks/221-machine-translation-with-output.rst @@ -1,7 +1,7 @@ Machine translation demo ======================== -.. _top: + This demo utilizes Intel’s pre-trained model that translates from English to German. More information about the model can be found @@ -18,6 +18,8 @@ following structure: ```` + *tokenized sentence* + ```` + **Output** After the inference, we have a sequence of up to 200 tokens. The structure is the same as the one for the input. +.. _top: + **Table of contents**: - `Downloading model <#downloading-model>`__ diff --git a/docs/notebooks/222-vision-image-colorization-with-output.rst b/docs/notebooks/222-vision-image-colorization-with-output.rst index 5985afd3fedb0f..5d3d32c0655969 100644 --- a/docs/notebooks/222-vision-image-colorization-with-output.rst +++ b/docs/notebooks/222-vision-image-colorization-with-output.rst @@ -1,7 +1,7 @@ Image Colorization with OpenVINO ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. _top: + This notebook demonstrates how to colorize images with OpenVINO using the Colorization model @@ -44,6 +44,8 @@ About Colorization-siggraph See the `colorization `__ repository for more details. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/223-text-prediction-with-output.rst b/docs/notebooks/223-text-prediction-with-output.rst index ef77dd1d3e04f1..eeb9f79f0f2097 100644 --- a/docs/notebooks/223-text-prediction-with-output.rst +++ b/docs/notebooks/223-text-prediction-with-output.rst @@ -1,7 +1,7 @@ Text Prediction with OpenVINO™ ============================== -.. _top: + This notebook shows text prediction with OpenVINO. This notebook can work in two different modes, Text Generation and Conversation, which the @@ -73,6 +73,8 @@ above. The Generated response is added to the history with the and the sequence is passed back into the model. +.. _top: + **Table of contents**: - `Model Selection <#model-selection>`__ diff --git a/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst b/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst index fef333d4d1c16f..8934a54ec2e574 100644 --- a/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst +++ b/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst @@ -1,7 +1,7 @@ Part Segmentation of 3D Point Clouds with OpenVINO™ =================================================== -.. _top: + This notebook demonstrates how to process `point cloud `__ data and run 3D @@ -24,6 +24,8 @@ segmentation, to scene semantic parsing. It is highly efficient and effective, showing strong performance on par or even better than state of the art. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/225-stable-diffusion-text-to-image-with-output.rst b/docs/notebooks/225-stable-diffusion-text-to-image-with-output.rst index 90f6243f6c3dda..255e3b6b2a51cc 100644 --- a/docs/notebooks/225-stable-diffusion-text-to-image-with-output.rst +++ b/docs/notebooks/225-stable-diffusion-text-to-image-with-output.rst @@ -1,7 +1,7 @@ Text-to-Image Generation with Stable Diffusion and OpenVINO™ ============================================================ -.. _top: + Stable Diffusion is a text-to-image latent diffusion model created by the researchers and engineers from @@ -41,6 +41,8 @@ Notebook contains the following steps: API. 3. Run Stable Diffusion pipeline with OpenVINO. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/226-yolov7-optimization-with-output.rst b/docs/notebooks/226-yolov7-optimization-with-output.rst index 330d988cab3802..5867c26429a728 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output.rst +++ b/docs/notebooks/226-yolov7-optimization-with-output.rst @@ -1,7 +1,7 @@ Convert and Optimize YOLOv7 with OpenVINO™ ========================================== -.. _top: + The YOLOv7 algorithm is making big waves in the computer vision and machine learning communities. It is a real-time object detection @@ -40,6 +40,8 @@ The tutorial consists of the following steps: - Compare accuracy of the FP32 and quantized models. - Compare performance of the FP32 and quantized models. +.. _top: + **Table of contents**: - `Get Pytorch model <#get-pytorch-model>`__ diff --git a/docs/notebooks/227-whisper-subtitles-generation-with-output.rst b/docs/notebooks/227-whisper-subtitles-generation-with-output.rst index 05b04c2fec8dd3..39d210defad500 100644 --- a/docs/notebooks/227-whisper-subtitles-generation-with-output.rst +++ b/docs/notebooks/227-whisper-subtitles-generation-with-output.rst @@ -1,7 +1,7 @@ Video Subtitle Generation using Whisper and OpenVINO™ ===================================================== -.. _top: + `Whisper `__ is an automatic speech recognition (ASR) system trained on 680,000 hours of multilingual and @@ -26,6 +26,8 @@ Download the model. 2. Instantiate the PyTorch model pipeline. 3. Export the ONNX model and convert it to OpenVINO IR, using model conversion API. 4. Run the Whisper pipeline with OpenVINO models. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/228-clip-zero-shot-convert-with-output.rst b/docs/notebooks/228-clip-zero-shot-convert-with-output.rst index 913817a8a4e34a..63f70768c20f1a 100644 --- a/docs/notebooks/228-clip-zero-shot-convert-with-output.rst +++ b/docs/notebooks/228-clip-zero-shot-convert-with-output.rst @@ -1,7 +1,7 @@ Zero-shot Image Classification with OpenAI CLIP and OpenVINO™ ============================================================= -.. _top: + Zero-shot image classification is a computer vision task to classify images into one of several classes without any prior training or @@ -30,6 +30,8 @@ image classification. The notebook contains the following steps: conversion API. 4. Run CLIP with OpenVINO. +.. _top: + **Table of contents**: - `Instantiate model <#instantiate-model>`__ diff --git a/docs/notebooks/228-clip-zero-shot-quantize-with-output.rst b/docs/notebooks/228-clip-zero-shot-quantize-with-output.rst index f6c2d4fb2f0bdf..1e335a73b2fd53 100644 --- a/docs/notebooks/228-clip-zero-shot-quantize-with-output.rst +++ b/docs/notebooks/228-clip-zero-shot-quantize-with-output.rst @@ -1,7 +1,7 @@ Post-Training Quantization of OpenAI CLIP model with NNCF ========================================================= -.. _top: + The goal of this tutorial is to demonstrate how to speed up the model by applying 8-bit post-training quantization from @@ -23,6 +23,8 @@ The optimization process contains the following steps: notebook first to generate OpenVINO IR model that is used for quantization. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/229-distilbert-sequence-classification-with-output.rst b/docs/notebooks/229-distilbert-sequence-classification-with-output.rst index 514d49925a5025..018993b6f036f9 100644 --- a/docs/notebooks/229-distilbert-sequence-classification-with-output.rst +++ b/docs/notebooks/229-distilbert-sequence-classification-with-output.rst @@ -1,7 +1,7 @@ Sentiment Analysis with OpenVINO™ ================================= -.. _top: + **Sentiment analysis** is the use of natural language processing, text analysis, computational linguistics, and biometrics to systematically @@ -9,6 +9,8 @@ identify, extract, quantify, and study affective states and subjective information. This notebook demonstrates how to convert and run a sequence classification model using OpenVINO. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/230-yolov8-optimization-with-output.rst b/docs/notebooks/230-yolov8-optimization-with-output.rst index 28d3b14a05169e..f3083e063aa1a2 100644 --- a/docs/notebooks/230-yolov8-optimization-with-output.rst +++ b/docs/notebooks/230-yolov8-optimization-with-output.rst @@ -1,7 +1,7 @@ Convert and Optimize YOLOv8 with OpenVINO™ ========================================== -.. _top: + The YOLOv8 algorithm developed by Ultralytics is a cutting-edge, state-of-the-art (SOTA) model that is designed to be fast, accurate, and @@ -39,6 +39,8 @@ The tutorial consists of the following steps: - Compare performance of the FP32 and quantized models. - Compare accuracy of the FP32 and quantized models. +.. _top: + **Table of contents**: - `Get Pytorch model <#get-pytorch-model>`__ diff --git a/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst b/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst index bf63a422e49bcf..308a358d1c51fc 100644 --- a/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst +++ b/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst @@ -1,7 +1,7 @@ Image Editing with InstructPix2Pix and OpenVINO =============================================== -.. _top: + The InstructPix2Pix is a conditional diffusion model that edits images based on written instructions provided by the user. Generative image @@ -31,6 +31,8 @@ Notebook contains the following steps: 3. Run InstructPix2Pix pipeline with OpenVINO. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/233-blip-visual-language-processing-with-output.rst b/docs/notebooks/233-blip-visual-language-processing-with-output.rst index 2637f314bf1d32..8468422b451f40 100644 --- a/docs/notebooks/233-blip-visual-language-processing-with-output.rst +++ b/docs/notebooks/233-blip-visual-language-processing-with-output.rst @@ -1,7 +1,7 @@ Visual Question Answering and Image Captioning using BLIP and OpenVINO ====================================================================== -.. _top: + Humans perceive the world through vision and language. A longtime goal of AI is to build intelligent agents that can understand the world @@ -24,6 +24,8 @@ The tutorial consists of the following parts: 2. Convert the BLIP model to OpenVINO IR. 3. Run visual question answering and image captioning with OpenVINO. +.. _top: + **Table of contents**: - `Background <#background>`__ diff --git a/docs/notebooks/234-encodec-audio-compression-with-output.rst b/docs/notebooks/234-encodec-audio-compression-with-output.rst index 309214879cdbde..7e98b009f940ba 100644 --- a/docs/notebooks/234-encodec-audio-compression-with-output.rst +++ b/docs/notebooks/234-encodec-audio-compression-with-output.rst @@ -1,7 +1,7 @@ Audio compression with EnCodec and OpenVINO =========================================== -.. _top: + Compression is an important part of the Internet today because it enables people to easily share high-quality photos, listen to audio @@ -28,6 +28,8 @@ and original `repo `__. image.png +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst b/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst index 1ce9e215d7697f..3ab1065358ffbc 100644 --- a/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst +++ b/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst @@ -1,7 +1,7 @@ Text-to-Image Generation with ControlNet Conditioning ===================================================== -.. _top: + Diffusion models make a revolution in AI-generated art. This technology enables creation of high-quality images simply by writing a text prompt. @@ -141,6 +141,8 @@ of the target in the image: This tutorial focuses mainly on conditioning by pose. However, the discussed steps are also applicable to other annotation modes. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst index 6916ae2fd5f239..4a1e447144f312 100644 --- a/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst @@ -1,7 +1,7 @@ Infinite Zoom Stable Diffusion v2 and OpenVINO™ =============================================== -.. _top: + Stable Diffusion v2 is the next generation of Stable Diffusion model a Text-to-Image latent diffusion model created by the researchers and @@ -74,6 +74,8 @@ Notebook contains the following steps: 3. Run Stable Diffusion v2 inpainting pipeline for generation infinity zoom video +.. _top: + **Table of contents**: - `Stable Diffusion v2 Infinite Zoom Showcase <#stable-diffusion-v2-infinite-zoom-showcase>`__ diff --git a/docs/notebooks/236-stable-diffusion-v2-optimum-demo-comparison-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-optimum-demo-comparison-with-output.rst index 59df2505a79d6f..ff8f9a9350f7ad 100644 --- a/docs/notebooks/236-stable-diffusion-v2-optimum-demo-comparison-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-optimum-demo-comparison-with-output.rst @@ -1,10 +1,12 @@ Stable Diffusion v2.1 using Optimum-Intel OpenVINO and multiple Intel Hardware ============================================================================== -.. _top: + |image0| +.. _top: + **Table of contents**: - `Showing Info Available Devices <#showing-info-available-devices>`__ diff --git a/docs/notebooks/236-stable-diffusion-v2-optimum-demo-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-optimum-demo-with-output.rst index 59641538c131ce..f44eda207c3306 100644 --- a/docs/notebooks/236-stable-diffusion-v2-optimum-demo-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-optimum-demo-with-output.rst @@ -1,10 +1,12 @@ Stable Diffusion v2.1 using Optimum-Intel OpenVINO ================================================== -.. _top: + |image0| +.. _top: + **Table of contents**: - `Showing Info Available Devices <#showing-info-available-devices>`__ diff --git a/docs/notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.rst index fc0468612224fe..7cd65143c0b083 100644 --- a/docs/notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.rst @@ -1,7 +1,7 @@ Stable Diffusion Text-to-Image Demo =================================== -.. _top: + Stable Diffusion is an innovative generative AI technique that allows us to generate and manipulate images in interesting ways, including @@ -26,6 +26,8 @@ promising results for selecting a wide range of input text prompts! `236-stable-diffusion-v2-text-to-image `__. +.. _top: + **Table of contents**: - `Step 0: Install and import prerequisites <#step-0-install-and-import-prerequisites>`__ diff --git a/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst index 826dc04d7ee881..f8cb417e3cf49c 100644 --- a/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst @@ -1,7 +1,7 @@ Text-to-Image Generation with Stable Diffusion v2 and OpenVINO™ =============================================================== -.. _top: + Stable Diffusion v2 is the next generation of Stable Diffusion model a Text-to-Image latent diffusion model created by the researchers and @@ -81,6 +81,8 @@ Notebook contains the following steps: notebook `__. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/237-segment-anything-with-output.rst b/docs/notebooks/237-segment-anything-with-output.rst index 454adae0660af3..25969d47260999 100644 --- a/docs/notebooks/237-segment-anything-with-output.rst +++ b/docs/notebooks/237-segment-anything-with-output.rst @@ -1,6 +1,8 @@ Object masks from prompts with SAM and OpenVINO =============================================== + + .. _top: **Table of contents**: diff --git a/docs/notebooks/238-deep-floyd-if-with-output.rst b/docs/notebooks/238-deep-floyd-if-with-output.rst index 7585c074bad129..5701933a9efb3a 100644 --- a/docs/notebooks/238-deep-floyd-if-with-output.rst +++ b/docs/notebooks/238-deep-floyd-if-with-output.rst @@ -1,8 +1,6 @@ Image generation with DeepFloyd IF and OpenVINO™ ================================================ -.. _top: - DeepFloyd IF is an advanced open-source text-to-image model that delivers remarkable photorealism and language comprehension. DeepFloyd IF consists of a frozen text encoder and three cascaded pixel diffusion @@ -78,6 +76,10 @@ vector in embedded space. conventional Super Resolution network to get hi-res results. + + +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/239-image-bind-convert-with-output.rst b/docs/notebooks/239-image-bind-convert-with-output.rst index bc4a983a5a21ba..ffd69a13191468 100644 --- a/docs/notebooks/239-image-bind-convert-with-output.rst +++ b/docs/notebooks/239-image-bind-convert-with-output.rst @@ -1,7 +1,7 @@ Binding multimodal data using ImageBind and OpenVINO ==================================================== -.. _top: + Exploring the surrounding world, people get information using multiple senses, for example, seeing a busy street and hearing the sounds of car @@ -69,6 +69,8 @@ represented on the image below: In this tutorial, we consider how to use ImageBind for multimodal zero-shot classification. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/240-dolly-2-instruction-following-with-output.rst b/docs/notebooks/240-dolly-2-instruction-following-with-output.rst index bbc1e2401599b8..9b450eb9902ce5 100644 --- a/docs/notebooks/240-dolly-2-instruction-following-with-output.rst +++ b/docs/notebooks/240-dolly-2-instruction-following-with-output.rst @@ -1,7 +1,7 @@ Instruction following using Databricks Dolly 2.0 and OpenVINO ============================================================= -.. _top: + The instruction following is one of the cornerstones of the current generation of large language models(LLMs). Reinforcement learning with @@ -82,6 +82,8 @@ post `__ +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/241-riffusion-text-to-music-with-output.rst b/docs/notebooks/241-riffusion-text-to-music-with-output.rst index cae9b6e81d19d0..d8eb9cb1462095 100644 --- a/docs/notebooks/241-riffusion-text-to-music-with-output.rst +++ b/docs/notebooks/241-riffusion-text-to-music-with-output.rst @@ -1,7 +1,7 @@ Text-to-Music generation using Riffusion and OpenVINO ===================================================== -.. _top: + `Riffusion `__ is a latent text-to-image diffusion model capable of generating spectrogram @@ -76,6 +76,8 @@ The STFT is invertible, so the original audio can be reconstructed from a spectrogram. This idea is a behind approach to using Riffusion for audio generation. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/242-freevc-voice-conversion-with-output.rst b/docs/notebooks/242-freevc-voice-conversion-with-output.rst index 5fcb41ebaf590d..1c39257b4a76f0 100644 --- a/docs/notebooks/242-freevc-voice-conversion-with-output.rst +++ b/docs/notebooks/242-freevc-voice-conversion-with-output.rst @@ -1,7 +1,7 @@ High-Quality Text-Free One-Shot Voice Conversion with FreeVC and OpenVINO™ ========================================================================== -.. _top: + `FreeVC `__ allows alter the voice of a source speaker to a target style, while keeping the linguistic content @@ -30,6 +30,8 @@ devices. It consists of the following steps: - Convert models to OpenVINO Intermediate Representation. - Inference using only OpenVINO’s IR models. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst b/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst index 69a2c1eecdde0b..c709cd516e92e6 100644 --- a/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst +++ b/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst @@ -1,7 +1,7 @@ Selfie Segmentation using TFLite and OpenVINO ============================================= -.. _top: + The Selfie segmentation pipeline allows developers to easily separate the background from users within a scene and focus on what matters. @@ -36,6 +36,8 @@ The tutorial consists of following steps: 2. Run inference on the image. 3. Run interactive background blurring demo on video. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/244-named-entity-recognition-with-output.rst b/docs/notebooks/244-named-entity-recognition-with-output.rst index 40dcb1455d73b3..dd6af58fd7bc13 100644 --- a/docs/notebooks/244-named-entity-recognition-with-output.rst +++ b/docs/notebooks/244-named-entity-recognition-with-output.rst @@ -1,7 +1,7 @@ Named entity recognition with OpenVINO™ ======================================= -.. _top: + The Named Entity Recognition(NER) is a natural language processing method that involves the detecting of key information in the @@ -27,6 +27,8 @@ To simplify the user experience, the `Hugging Face Optimum `__ library is used to convert the model to OpenVINO™ IR format and quantize it. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/248-stable-diffusion-xl-with-output.rst b/docs/notebooks/248-stable-diffusion-xl-with-output.rst index 457c66ce5399b9..594fb4f1a7b6e7 100644 --- a/docs/notebooks/248-stable-diffusion-xl-with-output.rst +++ b/docs/notebooks/248-stable-diffusion-xl-with-output.rst @@ -1,7 +1,7 @@ Image generation with Stable Diffusion XL and OpenVINO ====================================================== -.. _top: + Stable Diffusion XL or SDXL is the latest image generation model that is tailored towards more photorealistic outputs with more detailed imagery @@ -67,6 +67,8 @@ The tutorial consists of the following steps: Some demonstrated models can require at least 64GB RAM for conversion and running. +.. _top: + **Table of contents**: - `Install Prerequisites <#install-prerequisites>`__ diff --git a/docs/notebooks/250-music-generation-with-output.rst b/docs/notebooks/250-music-generation-with-output.rst index 1339c538e7e30c..733e303c35fa9e 100644 --- a/docs/notebooks/250-music-generation-with-output.rst +++ b/docs/notebooks/250-music-generation-with-output.rst @@ -1,7 +1,7 @@ Controllable Music Generation with MusicGen and OpenVINO ======================================================== -.. _top: + MusicGen is a single-stage auto-regressive Transformer model capable of generating high-quality music samples conditioned on text descriptions @@ -32,6 +32,8 @@ We will use a model implementation from the `Hugging Face Transformers `__ library. +.. _top: + **Table of contents**: - `Requirements and Imports <#prerequisites>`__ diff --git a/docs/notebooks/251-tiny-sd-image-generation-with-output.rst b/docs/notebooks/251-tiny-sd-image-generation-with-output.rst index f8043dfe5526bb..b2afd5f5c58864 100644 --- a/docs/notebooks/251-tiny-sd-image-generation-with-output.rst +++ b/docs/notebooks/251-tiny-sd-image-generation-with-output.rst @@ -1,7 +1,7 @@ Image Generation with Tiny-SD and OpenVINO™ =========================================== -.. _top: + In recent times, the AI community has witnessed a remarkable surge in the development of larger and more performant language models, such as @@ -41,7 +41,9 @@ The notebook contains the following steps: 3. Run Inference pipeline with OpenVINO. 4. Run Interactive demo for Tiny-SD model -**Table of content**: +.. _toc: + +**Table of contents**: - `Prerequisites <#prerequisites>`__ - `Create PyTorch Models pipeline <#create-pytorch-models-pipeline>`__ diff --git a/docs/notebooks/252-fastcomposer-image-generation-with-output.rst b/docs/notebooks/252-fastcomposer-image-generation-with-output.rst index 891e1dd364663c..d0c9a479aa0f06 100644 --- a/docs/notebooks/252-fastcomposer-image-generation-with-output.rst +++ b/docs/notebooks/252-fastcomposer-image-generation-with-output.rst @@ -1,7 +1,7 @@ `FastComposer: Tuning-Free Multi-Subject Image Generation with Localized Attention `__ ===================================================================================================================== -.. _top: + FastComposer uses subject embeddings extracted by an image encoder to augment the generic text conditioning in diffusion models, enabling @@ -32,6 +32,8 @@ different styles, actions, and contexts. drivers in the system - changes to have compatibility with transformers >= 4.30.1 (due to security vulnerability) +.. _top: + **Table of contents**: - `Install Prerequisites <#install-prerequisites>`__ diff --git a/docs/notebooks/253-zeroscope-text2video-with-output.rst b/docs/notebooks/253-zeroscope-text2video-with-output.rst index 4a538a6a8fc401..549a1ce04e5bfa 100644 --- a/docs/notebooks/253-zeroscope-text2video-with-output.rst +++ b/docs/notebooks/253-zeroscope-text2video-with-output.rst @@ -1,7 +1,7 @@ Video generation with ZeroScope and OpenVINO ============================================ -.. _top: + The ZeroScope model is a free and open-source text-to-video model that can generate realistic and engaging videos from text descriptions. It is @@ -34,6 +34,8 @@ Both versions of the ZeroScope model are available on Hugging Face: We will use the first one. +.. _top: + **Table of contents**: - `Install and import required packages <#install-and-import-required-packages>`__ diff --git a/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst b/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst index 353297f180564d..6054fb8ae8c823 100644 --- a/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst +++ b/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst @@ -11,6 +11,8 @@ A custom dataloader and metric will be defined, and accuracy and performance will be computed for the original IR model and the quantized model. +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/301-tensorflow-training-openvino-with-output.rst b/docs/notebooks/301-tensorflow-training-openvino-with-output.rst index 53b511021f88dc..0b02ba0ee4f272 100644 --- a/docs/notebooks/301-tensorflow-training-openvino-with-output.rst +++ b/docs/notebooks/301-tensorflow-training-openvino-with-output.rst @@ -1,6 +1,8 @@ From Training to Deployment with TensorFlow and OpenVINO™ ========================================================= + + .. _top: **Table of contents**: diff --git a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst index 766537b933d2df..3cc99a837ea6bb 100644 --- a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst +++ b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst @@ -1,7 +1,7 @@ Quantization Aware Training with NNCF, using PyTorch framework ============================================================== -.. _top: + This notebook is based on `ImageNet training in PyTorch `__. @@ -34,6 +34,8 @@ hub `__. This notebook requires a C++ compiler. +.. _top: + **Table of contents**: - `Imports and Settings <#imports-and-settings>`__ diff --git a/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst b/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst index b4673eb4c3e4bf..8f0ad9a7f72357 100644 --- a/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst +++ b/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst @@ -1,7 +1,7 @@ Quantization Aware Training with NNCF, using TensorFlow Framework ================================================================= -.. _top: + The goal of this notebook to demonstrate how to use the Neural Network Compression Framework `NNCF `__ @@ -23,6 +23,8 @@ Imagenette is a subset of 10 easily classified classes from the ImageNet dataset. Using the smaller model and dataset will speed up training and download time. +.. _top: + **Table of contents**: - `Imports and Settings <#imports-and-settings>`__ diff --git a/docs/notebooks/401-object-detection-with-output.rst b/docs/notebooks/401-object-detection-with-output.rst index bc83f4a2af3462..45ee50e220e9df 100644 --- a/docs/notebooks/401-object-detection-with-output.rst +++ b/docs/notebooks/401-object-detection-with-output.rst @@ -1,7 +1,7 @@ Live Object Detection with OpenVINO™ ==================================== -.. _top: + This notebook demonstrates live object detection with OpenVINO, using the `SSDLite @@ -17,6 +17,8 @@ Additionally, you can also upload a video file. with a webcam. If you run the notebook on a server, the webcam will not work. However, you can still do inference on a video. +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/402-pose-estimation-with-output.rst b/docs/notebooks/402-pose-estimation-with-output.rst index fbee0c5e4708fa..efe0ffcdd5564c 100644 --- a/docs/notebooks/402-pose-estimation-with-output.rst +++ b/docs/notebooks/402-pose-estimation-with-output.rst @@ -1,7 +1,7 @@ Live Human Pose Estimation with OpenVINO™ ========================================= -.. _top: + This notebook demonstrates live pose estimation with OpenVINO, using the OpenPose @@ -18,6 +18,8 @@ Additionally, you can also upload a video file. work. However, you can still do inference on a video in the final step. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/403-action-recognition-webcam-with-output.rst b/docs/notebooks/403-action-recognition-webcam-with-output.rst index d0cb4b74b57b00..d6755518701ca1 100644 --- a/docs/notebooks/403-action-recognition-webcam-with-output.rst +++ b/docs/notebooks/403-action-recognition-webcam-with-output.rst @@ -1,7 +1,7 @@ Human Action Recognition with OpenVINO™ ======================================= -.. _top: + This notebook demonstrates live human action recognition with OpenVINO, using the `Action Recognition @@ -39,6 +39,8 @@ Transformer and `ResNet34 `__. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/404-style-transfer-with-output.rst b/docs/notebooks/404-style-transfer-with-output.rst index 7c5d9c1022830d..630aca385b8d84 100644 --- a/docs/notebooks/404-style-transfer-with-output.rst +++ b/docs/notebooks/404-style-transfer-with-output.rst @@ -1,7 +1,7 @@ Style Transfer with OpenVINO™ ============================= -.. _top: + This notebook demonstrates style transfer with OpenVINO, using the Style Transfer Models from `ONNX Model @@ -32,6 +32,8 @@ Additionally, you can also upload a video file. but you can run inference, using a video file. +.. _top: + **Table of contents**: - `Preparation <#preparation>`__ diff --git a/docs/notebooks/405-paddle-ocr-webcam-with-output.rst b/docs/notebooks/405-paddle-ocr-webcam-with-output.rst index 608a9d4ab58f4d..8f11e078ae975e 100644 --- a/docs/notebooks/405-paddle-ocr-webcam-with-output.rst +++ b/docs/notebooks/405-paddle-ocr-webcam-with-output.rst @@ -1,7 +1,7 @@ PaddleOCR with OpenVINO™ ======================== -.. _top: + This demo shows how to run PP-OCR model on OpenVINO natively. Instead of exporting the PaddlePaddle model to ONNX and then converting to the @@ -25,6 +25,8 @@ the PaddleOCR is as follows: with a webcam. If you run the notebook on a server, the webcam will not work. You can still do inference on a video file. +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/notebooks/406-3D-pose-estimation-with-output.rst b/docs/notebooks/406-3D-pose-estimation-with-output.rst index 9038ce3098118c..121a5d44326cef 100644 --- a/docs/notebooks/406-3D-pose-estimation-with-output.rst +++ b/docs/notebooks/406-3D-pose-estimation-with-output.rst @@ -1,7 +1,7 @@ Live 3D Human Pose Estimation with OpenVINO =========================================== -.. _top: + This notebook demonstrates live 3D Human Pose Estimation with OpenVINO via a webcam. We utilize the model @@ -30,6 +30,8 @@ To ensure that the results are displayed correctly, run the code in a recommended browser on one of the following operating systems: Ubuntu, Windows: Chrome, macOS: Safari. +.. _top: + **Table of contents**: - `Prerequisites <#prerequisites>`__ diff --git a/docs/notebooks/407-person-tracking-with-output.rst b/docs/notebooks/407-person-tracking-with-output.rst index abc808bb273289..b267e6bd9ec6dc 100644 --- a/docs/notebooks/407-person-tracking-with-output.rst +++ b/docs/notebooks/407-person-tracking-with-output.rst @@ -1,7 +1,7 @@ Person Tracking with OpenVINO™ ============================== -.. _top: + This notebook demonstrates live person tracking with OpenVINO: it reads frames from an input video sequence, detects people in the frames, @@ -95,6 +95,8 @@ realtime tracking,” in ICIP, 2016, pp. 3464–3468. .. |deepsort| image:: https://user-images.githubusercontent.com/91237924/221744683-0042eff8-2c41-43b8-b3ad-b5929bafb60b.png +.. _top: + **Table of contents**: - `Imports <#imports>`__ diff --git a/docs/tutorials.md b/docs/tutorials.md index a4fa0ed98cb073..c21005bab47bd4 100644 --- a/docs/tutorials.md +++ b/docs/tutorials.md @@ -131,6 +131,15 @@ Tutorials that explain how to optimize and quantize models with OpenVINO tools. +----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ | `120-tensorflow-object-detection-to-openvino `__ |br| |n120| |br| |c120| | Convert TensorFlow Object Detection models to OpenVINO IR | +----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ + | `122-speech-recognition-quantization-wav2vec2 `__ | Quantize Speech Recognition Models with accuracy control using NNCF PTQ API. | + +----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ + | `122-yolov8-quantization-with-accuracy-control `__ | Convert and Optimize YOLOv8 with OpenVINO™. | + +----------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------+ + + + + + Model Demos From 2f782b21318c74d43eaa63faf4c01022450131e4 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Mon, 4 Sep 2023 17:03:49 +0400 Subject: [PATCH 06/16] [GPU] Add permute primitive instead of manual copy for deconv weights (#19516) --- .../intel_gpu/src/plugin/ops/constant.cpp | 164 +++++------------- .../intel_gpu/src/plugin/ops/convolution.cpp | 6 +- 2 files changed, 47 insertions(+), 123 deletions(-) diff --git a/src/plugins/intel_gpu/src/plugin/ops/constant.cpp b/src/plugins/intel_gpu/src/plugin/ops/constant.cpp index 249059b577ccae..664098477e82aa 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/constant.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/constant.cpp @@ -65,40 +65,60 @@ static cldnn::tensor getConstTensor(const ov::Shape constDims) { struct ConstProperties { bool needsBatchInterpretation; - bool swapOI; - bool hasGroupDimension; }; -static void createClDnnConstant(ProgramBuilder& p, const ov::Shape& constDims, const std::shared_ptr& op, const ConstProperties& props); +static void create_data(ProgramBuilder& p, const ov::Shape& constDims, const std::shared_ptr& op, const ConstProperties& props) { + cldnn::tensor constTensor = getConstTensor(constDims); + auto constFormat = cldnn::format::get_default_format(constDims.size()); + + if (props.needsBatchInterpretation) { + constTensor.batch[0] = static_cast(constTensor.count()); + constTensor.feature[0] = 1; + } + + // If constDims has a dimension = 0, then create tensor with single value + // TODO: check if dim=0 is a valid case + if (std::accumulate(constDims.begin(), constDims.end(), size_t(1), std::multiplies()) == 0) + constTensor = cldnn::tensor{1}; + + auto newDims = constDims; + cldnn::data_types out_dtype = cldnn::element_type_to_data_type(op->get_output_element_type(0)); + cldnn::layout constLayout = p.use_new_shape_infer() ? cldnn::layout(newDims, out_dtype, constFormat) : + cldnn::layout(out_dtype, constFormat, constTensor); + + cldnn::primitive_id initialconstPrimID = layer_type_name_ID(op); + cldnn::primitive_id constPrimID; + auto data = op->get_data_ptr(); + + auto bufIter = p.blobMemCache.find(std::make_pair(data, newDims)); + + if (bufIter != p.blobMemCache.end()) { + constPrimID = bufIter->second; + p.primitive_ids[initialconstPrimID] = constPrimID; + p.profiling_ids.push_back(initialconstPrimID); + } else { + GPU_DEBUG_LOG << "[" << initialconstPrimID << ": constant]" << std::endl; + cldnn::memory::ptr mem = p.get_engine().allocate_memory(constLayout, false); + auto& stream = p.get_engine().get_service_stream(); + cldnn::mem_lock lock{mem, stream}; + auto buf = lock.data(); + auto bufSize = constLayout.bytes_count(); + + std::memcpy(&buf[0], &data[0], bufSize); + p.add_primitive(*op, cldnn::data(initialconstPrimID, mem)); + p.blobMemCache[std::make_pair(data, newDims)] = initialconstPrimID; + constPrimID = initialconstPrimID; + } +} static void CreateConstantOp(ProgramBuilder& p, const std::shared_ptr& op) { ov::Shape constDims = op->get_shape(); auto constUsers = op->get_output_target_inputs(0); - size_t numConstUsers = constUsers.size(); std::unordered_map, ConstProperties> consts = { - {op, {false, false, false}} + {op, {false}} }; - // handleConvWeights function is executed when one of the constant users is ConvolutionBackpropData or GroupConvolutionBackpropData. - // In that case, we mark that constant's O and I dimensions need to be swapped. - auto handleConvWeights = [&op] (ov::Node* conv, std::unordered_map, ConstProperties>& consts, - size_t& numConstUsers, bool hasGroupDimension) { - // If constant has multiple users - create its copy and replace 'conv' weights with the copy. - // This is to make sure that dimension change doesn't break other users of the constant node. - // It is a shallow copy, but that's fine since in createClDnnConstant - // every constant created here, gets memcopied to a brand new cldnn::memory. - if (numConstUsers > 1) { - auto constant = std::make_shared(*(op.get())); - conv->input(1).replace_source_output(constant); - consts.insert({constant, {false, true, hasGroupDimension}}); - numConstUsers--; - } else { - consts[op].swapOI = true; - consts[op].hasGroupDimension = hasGroupDimension; - } - }; - auto is_binary_eltwise = [&] (ov::Node* op) -> bool { if (ov::op::util::is_binary_elementwise_arithmetic(op) || ov::op::util::is_binary_elementwise_logical(op) || @@ -152,10 +172,6 @@ static void CreateConstantOp(ProgramBuilder& p, const std::shared_ptr(outOp) || ov::is_type(outOp)) { consts[op].needsBatchInterpretation = constDims.size() == 1; - } else if (ov::is_type(outOp) && node.get_index() == 1) { - handleConvWeights(outOp, consts, numConstUsers, false); - } else if (ov::is_type(outOp) && node.get_index() == 1) { - handleConvWeights(outOp, consts, numConstUsers, true); } else if (ov::is_type(outOp) && node.get_index() == 1) { // PReLU slope tensor reshape policy // @@ -187,97 +203,7 @@ static void CreateConstantOp(ProgramBuilder& p, const std::shared_ptr& op, const ConstProperties& props) { - cldnn::tensor constTensor = getConstTensor(constDims); - auto constFormat = cldnn::format::get_default_format(constDims.size()); - - if (props.needsBatchInterpretation) { - constTensor.batch[0] = static_cast(constTensor.count()); - constTensor.feature[0] = 1; - } - - // If constDims has a dimension = 0, then create tensor with single value - // TODO: check if dim=0 is a valid case - if (std::accumulate(constDims.begin(), constDims.end(), size_t(1), std::multiplies()) == 0) - constTensor = cldnn::tensor{1}; - - // Swap O and I dimensions to match expected deconvolution weights format - size_t inputFeatureElements = 1; - size_t outputFeatureElements = 1; - size_t groups = 1; - auto newDims = constDims; - if (props.swapOI) { - size_t expected_min_rank = 2 + (props.hasGroupDimension ? 1 : 0); - if (expected_min_rank > constDims.size()) - OPENVINO_THROW("Invalid constant properties or shape"); - - if (props.hasGroupDimension) { - std::swap(newDims[2], newDims[1]); - inputFeatureElements = newDims[2]; - outputFeatureElements = newDims[1]; - groups = newDims[0]; - } else { - std::swap(newDims[1], newDims[0]); - inputFeatureElements = newDims[1]; - outputFeatureElements = newDims[0]; - groups = 1; - } - constTensor = getConstTensor(newDims); - } - - cldnn::data_types out_dtype = cldnn::element_type_to_data_type(op->get_output_element_type(0)); - cldnn::layout constLayout = p.use_new_shape_infer() ? cldnn::layout(newDims, out_dtype, constFormat) : - cldnn::layout(out_dtype, constFormat, constTensor); - - cldnn::primitive_id initialconstPrimID = layer_type_name_ID(op); - cldnn::primitive_id constPrimID; - auto data = op->get_data_ptr(); - - auto bufIter = p.blobMemCache.find(std::make_pair(data, newDims)); - - if (bufIter != p.blobMemCache.end()) { - constPrimID = bufIter->second; - p.primitive_ids[initialconstPrimID] = constPrimID; - p.profiling_ids.push_back(initialconstPrimID); - } else { - GPU_DEBUG_LOG << "[" << initialconstPrimID << ": constant]" << std::endl; - cldnn::memory::ptr mem = p.get_engine().allocate_memory(constLayout, false); - auto& stream = p.get_engine().get_service_stream(); - cldnn::mem_lock lock{mem, stream}; - auto buf = lock.data(); - auto bufSize = constLayout.bytes_count(); - - // Do actual weights reorder and change O and I channels order - if (props.swapOI) { - auto elementSize = cldnn::data_type_traits::size_of(constLayout.data_type); - size_t spatial_dim_off = props.hasGroupDimension ? 3 : 2; - size_t featureSize = elementSize; - for (size_t i = spatial_dim_off; i < constDims.size(); i++) { - featureSize *= constDims[i]; - } - - for (size_t g = 0; g < groups; g++) { - for (size_t i = 0; i < inputFeatureElements; i++) { - for (size_t o = 0; o < outputFeatureElements; o++) { - size_t outputShift = ((g*outputFeatureElements + o)*inputFeatureElements + i)*featureSize; - size_t inputShift = ((g*inputFeatureElements + i)*outputFeatureElements + o)*featureSize; - - for (size_t b = 0; b < featureSize; b++) { - buf[outputShift + b] = data[inputShift + b]; - } - } - } - } - } else { - std::memcpy(&buf[0], &data[0], bufSize); - } - p.add_primitive(*op, cldnn::data(initialconstPrimID, mem)); - p.blobMemCache[std::make_pair(data, newDims)] = initialconstPrimID; - constPrimID = initialconstPrimID; + create_data(p, constDims, it.first, it.second); } } diff --git a/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp b/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp index 792b906bb66245..64704964ab4f3e 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp @@ -114,12 +114,11 @@ static void CreateConvolutionBackpropDataOp(ProgramBuilder& p, const std::shared auto weightsName = inputs[1]; auto weights_node = op->get_input_node_shared_ptr(1); - bool hasConstantWeights = IsNodeOnConstPath(weights_node); // WA: For the cases like Const(weights)->Sub(zp)->Deconv. And also for the cases with real runtime weights. // Dimensions order of weights blob is IOYX, but // the selected format is OIYX by default. So we need to swap (and transpose) I and O dimensions to match the format // For Constant node on input transpose is not needed, because the data is transposed on const node creation - if ((hasConstantWeights && std::dynamic_pointer_cast(weights_node) == nullptr) || !hasConstantWeights) { + { std::string permuteName = layerName + "_cldnn_weights_permute"; auto weights_rank = op->get_input_shape(1).size(); std::vector permute_order(weights_rank); @@ -204,12 +203,11 @@ static void CreateGroupConvolutionBackpropDataOp(ProgramBuilder& p, const std::s auto weightsName = inputs[1]; auto weights_node = op->get_input_node_shared_ptr(1); - bool hasConstWeights = IsNodeOnConstPath(weights_node); // WA: For the cases like Const(weights)->Sub(zp)->Deconv. And also for the cases with real runtime weights. // Dimensions order of weights blob is IOYX, but // the selected format is OIYX by default. So we need to swap I and O dimensions to match the format. // For Constant node on input transpose is not needed, because the data is transposed on const node creation - if ((hasConstWeights && std::dynamic_pointer_cast(weights_node) == nullptr) || !hasConstWeights) { + { std::string permuteName = layerName + "_cldnn_weights_permute"; auto weights_rank = op->get_input_shape(1).size(); std::vector permute_order(weights_rank); From 3677dda457d4b1981dbf62026f61ae8c6e7ec45b Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Mon, 4 Sep 2023 15:40:02 +0200 Subject: [PATCH 07/16] [DOCS] 23.0 to 23.1 link update for master (#19584) * 2023.1 link fix * 2023.1 link fix * 2023.1 link fix * 2023.1 link fix * 2023.1 link fix --- README.md | 29 ++++++----- docs/IE_PLUGIN_DG/Intro.md | 4 +- docs/IE_PLUGIN_DG/dev_api_references.md | 4 +- .../Convert_RetinaNet_From_Tensorflow.md | 2 +- .../integrate_with_your_application.md | 4 +- docs/OV_Runtime_UG/ov_dynamic_shapes.md | 4 +- .../preprocessing_usecase_save.md | 6 +-- docs/benchmarks/performance_benchmarks.md | 2 +- .../cmake_options_for_custom_compilation.md | 4 +- docs/dev/debug_capabilities.md | 2 +- docs/gapi/face_beautification.md | 4 +- docs/gapi/gapi_face_analytics_pipeline.md | 8 +-- docs/home.rst | 8 +-- .../install_guides/installing-openvino-pip.md | 8 +-- docs/install_guides/pypi-openvino-dev.md | 2 +- docs/install_guides/pypi-openvino-rt.md | 4 +- .../notebooks/001-hello-world-with-output.rst | 2 +- .../002-openvino-api-with-output.rst | 8 +-- .../003-hello-segmentation-with-output.rst | 2 +- .../004-hello-detection-with-output.rst | 2 +- ...classification-to-openvino-with-output.rst | 12 ++--- ...2-pytorch-onnx-to-openvino-with-output.rst | 8 +-- .../102-pytorch-to-openvino-with-output.rst | 4 +- ...to-openvino-classification-with-output.rst | 8 +-- .../notebooks/104-model-tools-with-output.rst | 4 +- ...105-language-quantize-bert-with-output.rst | 2 +- .../notebooks/106-auto-device-with-output.rst | 14 +++--- ...tion-quantization-data2vec-with-output.rst | 4 +- docs/notebooks/108-gpu-device-with-output.rst | 42 ++++++++-------- .../109-latency-tricks-with-output.rst | 6 +-- .../109-throughput-tricks-with-output.rst | 8 +-- ...110-ct-scan-live-inference-with-output.rst | 4 +- ...segmentation-quantize-nncf-with-output.rst | 4 +- ...ov5-quantization-migration-with-output.rst | 10 ++-- ...training-quantization-nncf-with-output.rst | 6 +-- ...lassification-quantization-with-output.rst | 6 +-- docs/notebooks/115-async-api-with-output.rst | 2 +- .../116-sparsity-optimization-with-output.rst | 6 +-- .../117-model-server-with-output.rst | 4 +- ...118-optimize-preprocessing-with-output.rst | 20 ++++---- .../119-tflite-to-openvino-with-output.rst | 12 ++--- ...ject-detection-to-openvino-with-output.rst | 11 ++-- .../121-convert-to-openvino-with-output.rst | 50 +++++++++---------- .../201-vision-monodepth-with-output.rst | 2 +- ...sion-superresolution-image-with-output.rst | 2 +- ...sion-superresolution-video-with-output.rst | 2 +- .../203-meter-reader-with-output.rst | 2 +- ...nter-semantic-segmentation-with-output.rst | 4 +- ...-vision-background-removal-with-output.rst | 4 +- ...206-vision-paddlegan-anime-with-output.rst | 6 +-- ...ical-character-recognition-with-output.rst | 8 +-- .../209-handwritten-ocr-with-output.rst | 4 +- .../215-image-inpainting-with-output.rst | 4 +- .../216-attention-center-with-output.rst | 2 +- .../217-vision-deblur-with-output.rst | 4 +- ...219-knowledge-graphs-conve-with-output.rst | 6 +-- ...ss-lingual-books-alignment-with-output.rst | 20 ++++---- ...-vision-image-colorization-with-output.rst | 4 +- .../223-text-prediction-with-output.rst | 2 +- ...-segmentation-point-clouds-with-output.rst | 2 +- .../226-yolov7-optimization-with-output.rst | 2 +- ...rt-sequence-classification-with-output.rst | 6 +-- .../230-yolov8-optimization-with-output.rst | 5 +- ...clip-language-saliency-map-with-output.rst | 2 +- ...ontrolnet-stable-diffusion-with-output.rst | 2 +- ...diffusion-v2-infinite-zoom-with-output.rst | 8 +-- ...-diffusion-v2-optimum-demo-with-output.rst | 2 +- ...diffusion-v2-text-to-image-with-output.rst | 2 +- .../237-segment-anything-with-output.rst | 2 +- .../238-deep-floyd-if-with-output.rst | 2 +- .../239-image-bind-convert-with-output.rst | 2 +- ...42-freevc-voice-conversion-with-output.rst | 2 +- ...tflite-selfie-segmentation-with-output.rst | 4 +- .../245-typo-detector-with-output.rst | 10 ++-- ...6-depth-estimation-videpth-with-output.rst | 4 +- .../247-code-language-id-with-output.rst | 4 +- .../250-music-generation-with-output.rst | 4 +- ...low-training-openvino-nncf-with-output.rst | 10 ++-- ...nsorflow-training-openvino-with-output.rst | 2 +- ...uantization-aware-training-with-output.rst | 4 +- ...uantization-aware-training-with-output.rst | 4 +- .../401-object-detection-with-output.rst | 2 +- .../406-3D-pose-estimation-with-output.rst | 2 +- .../407-person-tracking-with-output.rst | 8 +-- .../dldt_deployment_optimization_common.md | 2 +- docs/resources/prerelease_information.md | 2 +- .../hello_nv12_input_classification/README.md | 2 +- .../classification_sample_async/README.md | 10 ++-- samples/python/hello_classification/README.md | 28 +++++------ samples/python/hello_query_device/README.md | 8 +-- samples/python/hello_reshape_ssd/README.md | 8 +-- .../python/model_creation_sample/README.md | 24 ++++----- samples/python/speech_sample/README.md | 12 ++--- src/README.md | 2 +- src/bindings/c/README.md | 8 +-- .../how_to_wrap_openvino_interfaces_with_c.md | 2 +- .../how_to_wrap_openvino_objects_with_c.md | 2 +- src/bindings/c/docs/how_to_write_unit_test.md | 2 +- src/plugins/auto/docs/integration.md | 2 +- src/plugins/proxy/README.md | 2 +- tools/pot/README.md | 10 ++-- tools/pot/docs/ModelRepresentation.md | 2 +- 102 files changed, 327 insertions(+), 330 deletions(-) diff --git a/README.md b/README.md index 86c3604d2b541a..844e44a0ace216 100644 --- a/README.md +++ b/README.md @@ -68,24 +68,24 @@ The OpenVINO™ Runtime can infer models on different hardware devices. This sec CPU - Intel CPU + Intel CPU openvino_intel_cpu_plugin Intel Xeon with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel Core Processors with Intel AVX2, Intel Atom Processors with Intel® Streaming SIMD Extensions (Intel® SSE) - ARM CPU + ARM CPU openvino_arm_cpu_plugin Raspberry Pi™ 4 Model B, Apple® Mac mini with M1 chip, NVIDIA® Jetson Nano™, Android™ devices GPU - Intel GPU + Intel GPU openvino_intel_gpu_plugin Intel Processor Graphics, including Intel HD Graphics and Intel Iris Graphics GNA - Intel GNA + Intel GNA openvino_intel_gna_plugin Intel Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel Pentium Silver J5005 Processor, Intel Pentium Silver N5000 Processor, Intel Celeron J4005 Processor, Intel Celeron J4105 Processor, Intel Celeron Processor N4100, Intel Celeron Processor N4000, Intel Core i3-8121U Processor, Intel Core i7-1065G7 Processor, Intel Core i7-1060G7 Processor, Intel Core i5-1035G4 Processor, Intel Core i5-1035G7 Processor, Intel Core i5-1035G1 Processor, Intel Core i5-1030G7 Processor, Intel Core i5-1030G4 Processor, Intel Core i3-1005G1 Processor, Intel Core i3-1000G1 Processor, Intel Core i3-1000G4 Processor @@ -103,22 +103,22 @@ OpenVINO™ Toolkit also contains several plugins which simplify loading models - Auto + Auto openvino_auto_plugin Auto plugin enables selecting Intel device for inference automatically - Auto Batch + Auto Batch openvino_auto_batch_plugin Auto batch plugin performs on-the-fly automatic batching (i.e. grouping inference requests together) to improve device utilization, with no programming effort from the user - Hetero + Hetero openvino_hetero_plugin Heterogeneous execution enables automatic inference splitting between several devices - Multi + Multi openvino_auto_plugin Multi plugin enables simultaneous inference of the same model on several devices in parallel @@ -155,10 +155,9 @@ The list of OpenVINO tutorials: ## System requirements The system requirements vary depending on platform and are available on dedicated pages: -- [Linux](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_installing_openvino_linux_header.html) -- [Windows](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_installing_openvino_windows_header.html) -- [macOS](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_installing_openvino_macos_header.html) -- [Raspbian](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_installing_openvino_raspbian.html) +- [Linux](https://docs.openvino.ai/2023.1/openvino_docs_install_guides_installing_openvino_linux_header.html) +- [Windows](https://docs.openvino.ai/2023.1/openvino_docs_install_guides_installing_openvino_windows_header.html) +- [macOS](https://docs.openvino.ai/2023.1/openvino_docs_install_guides_installing_openvino_macos_header.html) ## How to build @@ -196,7 +195,7 @@ Report questions, issues and suggestions, using: \* Other names and brands may be claimed as the property of others. [Open Model Zoo]:https://github.com/openvinotoolkit/open_model_zoo -[OpenVINO™ Runtime]:https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_OV_Runtime_User_Guide.html -[Model Optimizer]:https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html -[Post-Training Optimization Tool]:https://docs.openvino.ai/2023.0/pot_introduction.html +[OpenVINO™ Runtime]:https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_OV_Runtime_User_Guide.html +[Model Optimizer]:https://docs.openvino.ai/2023.1/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html +[Post-Training Optimization Tool]:https://docs.openvino.ai/2023.1/pot_introduction.html [Samples]:https://github.com/openvinotoolkit/openvino/tree/master/samples diff --git a/docs/IE_PLUGIN_DG/Intro.md b/docs/IE_PLUGIN_DG/Intro.md index 80c01aa8af80e1..c33cf9589b372d 100644 --- a/docs/IE_PLUGIN_DG/Intro.md +++ b/docs/IE_PLUGIN_DG/Intro.md @@ -94,7 +94,7 @@ Detailed Guides API References ############## -* `OpenVINO Plugin API `__ -* `OpenVINO Transformation API `__ +* `OpenVINO Plugin API `__ +* `OpenVINO Transformation API `__ @endsphinxdirective diff --git a/docs/IE_PLUGIN_DG/dev_api_references.md b/docs/IE_PLUGIN_DG/dev_api_references.md index 5e6cc22c57d3e7..aa1a2a464079ee 100644 --- a/docs/IE_PLUGIN_DG/dev_api_references.md +++ b/docs/IE_PLUGIN_DG/dev_api_references.md @@ -15,7 +15,7 @@ The guides below provides extra API references needed for OpenVINO plugin development: -* `OpenVINO Plugin API `__ -* `OpenVINO Transformation API `__ +* `OpenVINO Plugin API `__ +* `OpenVINO Transformation API `__ @endsphinxdirective diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_RetinaNet_From_Tensorflow.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_RetinaNet_From_Tensorflow.md index 908135935e0371..77da5d31049ca6 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_RetinaNet_From_Tensorflow.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_RetinaNet_From_Tensorflow.md @@ -10,7 +10,7 @@ This tutorial explains how to convert a RetinaNet model to the Intermediate Representation (IR). `Public RetinaNet model `__ does not contain pretrained TensorFlow weights. -To convert this model to the TensorFlow format, follow the `Reproduce Keras to TensorFlow Conversion tutorial `__. +To convert this model to the TensorFlow format, follow the `Reproduce Keras to TensorFlow Conversion tutorial `__. After converting the model to TensorFlow format, run the following command: diff --git a/docs/OV_Runtime_UG/integrate_with_your_application.md b/docs/OV_Runtime_UG/integrate_with_your_application.md index 08ff6c8f3f4d45..6a024ff13bbe63 100644 --- a/docs/OV_Runtime_UG/integrate_with_your_application.md +++ b/docs/OV_Runtime_UG/integrate_with_your_application.md @@ -437,9 +437,9 @@ To build your project using CMake with the default build tools currently availab Additional Resources #################### -* See the :doc:`OpenVINO Samples ` page or the `Open Model Zoo Demos `__ page for specific examples of how OpenVINO pipelines are implemented for applications like image classification, text prediction, and many others. +* See the :doc:`OpenVINO Samples ` page or the `Open Model Zoo Demos `__ page for specific examples of how OpenVINO pipelines are implemented for applications like image classification, text prediction, and many others. * :doc:`OpenVINO™ Runtime Preprocessing ` * :doc:`Using Encrypted Models with OpenVINO ` -* `Open Model Zoo Demos `__ +* `Open Model Zoo Demos `__ @endsphinxdirective diff --git a/docs/OV_Runtime_UG/ov_dynamic_shapes.md b/docs/OV_Runtime_UG/ov_dynamic_shapes.md index 782029223db7ea..f318105a40b4a8 100644 --- a/docs/OV_Runtime_UG/ov_dynamic_shapes.md +++ b/docs/OV_Runtime_UG/ov_dynamic_shapes.md @@ -62,7 +62,7 @@ Model input dimensions can be specified as dynamic using the model.reshape metho Some models may already have dynamic shapes out of the box and do not require additional configuration. This can either be because it was generated with dynamic shapes from the source framework, or because it was converted with Model Conversion API to use dynamic shapes. For more information, see the Dynamic Dimensions “Out of the Box” section. -The examples below show how to set dynamic dimensions with a model that has a static ``[1, 3, 224, 224]`` input shape (such as `mobilenet-v2 `__). The first example shows how to change the first dimension (batch size) to be dynamic. In the second example, the third and fourth dimensions (height and width) are set as dynamic. +The examples below show how to set dynamic dimensions with a model that has a static ``[1, 3, 224, 224]`` input shape (such as `mobilenet-v2 `__). The first example shows how to change the first dimension (batch size) to be dynamic. In the second example, the third and fourth dimensions (height and width) are set as dynamic. .. tab-set:: @@ -175,7 +175,7 @@ The lower and/or upper bounds of a dynamic dimension can also be specified. They .. tab-item:: C :sync: c - The dimension bounds can be coded as arguments for `ov_dimension `__, as shown in these examples: + The dimension bounds can be coded as arguments for `ov_dimension `__, as shown in these examples: .. doxygensnippet:: docs/snippets/ov_dynamic_shapes.c :language: cpp diff --git a/docs/OV_Runtime_UG/preprocessing_usecase_save.md b/docs/OV_Runtime_UG/preprocessing_usecase_save.md index b52c063c7fb055..71de4a7e5cc82f 100644 --- a/docs/OV_Runtime_UG/preprocessing_usecase_save.md +++ b/docs/OV_Runtime_UG/preprocessing_usecase_save.md @@ -110,8 +110,8 @@ Additional Resources * :doc:`Layout API overview ` * :doc:`Model Optimizer - Optimize Preprocessing Computation ` * :doc:`Model Caching Overview ` -* The `ov::preprocess::PrePostProcessor `__ C++ class documentation -* The `ov::pass::Serialize `__ - pass to serialize model to XML/BIN -* The `ov::set_batch `__ - update batch dimension for a given model +* The `ov::preprocess::PrePostProcessor `__ C++ class documentation +* The `ov::pass::Serialize `__ - pass to serialize model to XML/BIN +* The `ov::set_batch `__ - update batch dimension for a given model @endsphinxdirective diff --git a/docs/benchmarks/performance_benchmarks.md b/docs/benchmarks/performance_benchmarks.md index fc2b582dd38393..9c95329e184726 100644 --- a/docs/benchmarks/performance_benchmarks.md +++ b/docs/benchmarks/performance_benchmarks.md @@ -13,7 +13,7 @@ openvino_docs_performance_benchmarks_faq OpenVINO Accuracy - Performance Data Spreadsheet (download xlsx) + Performance Data Spreadsheet (download xlsx) openvino_docs_MO_DG_Getting_Performance_Numbers diff --git a/docs/dev/cmake_options_for_custom_compilation.md b/docs/dev/cmake_options_for_custom_compilation.md index 5142645d8bcf32..1b4f3b7eb5752b 100644 --- a/docs/dev/cmake_options_for_custom_compilation.md +++ b/docs/dev/cmake_options_for_custom_compilation.md @@ -189,8 +189,8 @@ In this case OpenVINO CMake scripts take `TBBROOT` environment variable into acc [pugixml]:https://pugixml.org/ [ONNX]:https://onnx.ai/ [protobuf]:https://github.com/protocolbuffers/protobuf -[deployment manager]:https://docs.openvino.ai/2023.0/openvino_docs_install_guides_deployment_manager_tool.html -[OpenVINO Runtime Introduction]:https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_Integrate_OV_with_your_application.html +[deployment manager]:https://docs.openvino.ai/2023.1/openvino_docs_install_guides_deployment_manager_tool.html +[OpenVINO Runtime Introduction]:https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_Integrate_OV_with_your_application.html [PDPD]:https://github.com/PaddlePaddle/Paddle [TensorFlow]:https://www.tensorflow.org/ [TensorFlow Lite]:https://www.tensorflow.org/lite diff --git a/docs/dev/debug_capabilities.md b/docs/dev/debug_capabilities.md index c576cd9879ff5c..52c19eacdd2cc2 100644 --- a/docs/dev/debug_capabilities.md +++ b/docs/dev/debug_capabilities.md @@ -2,7 +2,7 @@ OpenVINO components provides different debug capabilities, to get more information please read: -* [OpenVINO Model Debug Capabilities](https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_Model_Representation.html#model-debug-capabilities) +* [OpenVINO Model Debug Capabilities](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_Model_Representation.html#model-debug-capabilities) * [OpenVINO Pass Manager Debug Capabilities](#todo) ## See also diff --git a/docs/gapi/face_beautification.md b/docs/gapi/face_beautification.md index b5201f19561b9f..5b11a5c45e69d1 100644 --- a/docs/gapi/face_beautification.md +++ b/docs/gapi/face_beautification.md @@ -24,8 +24,8 @@ This sample requires: * OpenCV 4.2 or higher built with `Intel® Distribution of OpenVINO™ Toolkit `__ (building with `Intel® TBB `__ is a plus) * The following pre-trained models from the :doc:`Open Model Zoo ` - * `face-detection-adas-0001 `__ - * `facial-landmarks-35-adas-0002 `__ + * `face-detection-adas-0001 `__ + * `facial-landmarks-35-adas-0002 `__ To download the models from the Open Model Zoo, use the :doc:`Model Downloader ` tool. diff --git a/docs/gapi/gapi_face_analytics_pipeline.md b/docs/gapi/gapi_face_analytics_pipeline.md index e1c25c7d134a73..dc34bcbb03ba7b 100644 --- a/docs/gapi/gapi_face_analytics_pipeline.md +++ b/docs/gapi/gapi_face_analytics_pipeline.md @@ -24,9 +24,9 @@ This sample requires: * OpenCV 4.2 or higher built with `Intel® Distribution of OpenVINO™ Toolkit `__ (building with `Intel® TBB `__ is a plus) * The following pre-trained models from the :doc:`Open Model Zoo ` - * `face-detection-adas-0001 `__ - * `age-gender-recognition-retail-0013 `__ - * `emotions-recognition-retail-0003 `__ + * `face-detection-adas-0001 `__ + * `age-gender-recognition-retail-0013 `__ + * `emotions-recognition-retail-0003 `__ To download the models from the Open Model Zoo, use the :doc:`Model Downloader ` tool. @@ -42,7 +42,7 @@ Starting with version 4.2, OpenCV offers a solution to this problem. OpenCV G-AP Pipeline Overview ################# -Our sample application is based on `Interactive Face Detection `__ demo from Open Model Zoo. A simplified pipeline consists of the following steps: +Our sample application is based on `Interactive Face Detection `__ demo from Open Model Zoo. A simplified pipeline consists of the following steps: 1. Image acquisition and decode 2. Detection with preprocessing diff --git a/docs/home.rst b/docs/home.rst index 0fd9241372e952..02e94efe76efdb 100644 --- a/docs/home.rst +++ b/docs/home.rst @@ -23,10 +23,10 @@ OpenVINO 2023.0
diff --git a/docs/install_guides/installing-openvino-pip.md b/docs/install_guides/installing-openvino-pip.md index 3d586984c9d40d..00413eeae3d762 100644 --- a/docs/install_guides/installing-openvino-pip.md +++ b/docs/install_guides/installing-openvino-pip.md @@ -111,16 +111,16 @@ Now that you've installed OpenVINO Runtime, you're ready to run your own machine .. image:: https://user-images.githubusercontent.com/15709723/127752390-f6aa371f-31b5-4846-84b9-18dd4f662406.gif :width: 400 -Try the `Python Quick Start Example `__ to estimate depth in a scene using an OpenVINO monodepth model in a Jupyter Notebook inside your web browser. +Try the `Python Quick Start Example `__ to estimate depth in a scene using an OpenVINO monodepth model in a Jupyter Notebook inside your web browser. Get started with Python +++++++++++++++++++++++ Visit the :doc:`Tutorials ` page for more Jupyter Notebooks to get you started with OpenVINO, such as: -* `OpenVINO Python API Tutorial `__ -* `Basic image classification program with Hello Image Classification `__ -* `Convert a PyTorch model and use it for image background removal `__ +* `OpenVINO Python API Tutorial `__ +* `Basic image classification program with Hello Image Classification `__ +* `Convert a PyTorch model and use it for image background removal `__ Run OpenVINO on accelerated devices +++++++++++++++++++++++++++++++++++ diff --git a/docs/install_guides/pypi-openvino-dev.md b/docs/install_guides/pypi-openvino-dev.md index 1826a5f3b2a922..df7568f9a179bf 100644 --- a/docs/install_guides/pypi-openvino-dev.md +++ b/docs/install_guides/pypi-openvino-dev.md @@ -127,7 +127,7 @@ For example, to install and configure the components for working with TensorFlow ## Troubleshooting -For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.0/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages. +For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.1/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages. ### Errors with Installing via PIP for Users in China diff --git a/docs/install_guides/pypi-openvino-rt.md b/docs/install_guides/pypi-openvino-rt.md index 88cfccf4bb43a5..2007e88dc1a4b3 100644 --- a/docs/install_guides/pypi-openvino-rt.md +++ b/docs/install_guides/pypi-openvino-rt.md @@ -5,7 +5,7 @@ Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing and deploying AI inference. It can be used to develop applications and solutions based on deep learning tasks, such as: emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, etc. It provides high-performance and rich deployment options, from edge to cloud. -If you have already finished developing your models and converting them to the OpenVINO model format, you can install OpenVINO Runtime to deploy your applications on various devices. The [OpenVINO™ Runtime](https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) Python package includes a set of libraries for an easy inference integration with your products. +If you have already finished developing your models and converting them to the OpenVINO model format, you can install OpenVINO Runtime to deploy your applications on various devices. The [OpenVINO™ Runtime](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) Python package includes a set of libraries for an easy inference integration with your products. ## System Requirements @@ -72,7 +72,7 @@ If installation was successful, you will see the list of available devices. ## Troubleshooting -For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.0/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages. +For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.1/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages. ### Errors with Installing via PIP for Users in China diff --git a/docs/notebooks/001-hello-world-with-output.rst b/docs/notebooks/001-hello-world-with-output.rst index b5fc484fc044c1..797cc193e53221 100644 --- a/docs/notebooks/001-hello-world-with-output.rst +++ b/docs/notebooks/001-hello-world-with-output.rst @@ -7,7 +7,7 @@ This basic introduction to OpenVINO™ shows how to do inference with an image classification model. A pre-trained `MobileNetV3 -model `__ +model `__ from `Open Model Zoo `__ is used in this tutorial. For more information about how OpenVINO IR models are diff --git a/docs/notebooks/002-openvino-api-with-output.rst b/docs/notebooks/002-openvino-api-with-output.rst index ef4eca621d258d..d66cee6545282f 100644 --- a/docs/notebooks/002-openvino-api-with-output.rst +++ b/docs/notebooks/002-openvino-api-with-output.rst @@ -104,7 +104,7 @@ After initializing OpenVINO Runtime, first read the model file with ``compile_model()`` method. `OpenVINO™ supports several model -formats `__ +formats `__ and enables developers to convert them to its own OpenVINO IR format using a tool dedicated to this task. @@ -123,7 +123,7 @@ file has a different filename, it can be specified using the ``weights`` parameter in ``read_model()``. The OpenVINO `model conversion -API `__ +API `__ tool is used to convert models to OpenVINO IR format. Model conversion API reads the original model and creates an OpenVINO IR model (``.xml`` and ``.bin`` files) so inference can be performed without delays due to @@ -299,7 +299,7 @@ TensorFlow models saved in frozen graph format can also be passed to support will be provided in the upcoming 2023 releases. Currently support is limited to only frozen graph inference format. Other TensorFlow model formats must be converted to OpenVINO IR using - `model conversion API `__. + `model conversion API `__. .. code:: ipython3 @@ -573,7 +573,7 @@ Doing Inference on a Model The diagram below shows a typical inference pipeline with OpenVINO -.. figure:: https://docs.openvino.ai/2023.0/_images/IMPLEMENT_PIPELINE_with_API_C.svg +.. figure:: https://docs.openvino.ai/2023.1/_images/IMPLEMENT_PIPELINE_with_API_C.svg :alt: image.png image.png diff --git a/docs/notebooks/003-hello-segmentation-with-output.rst b/docs/notebooks/003-hello-segmentation-with-output.rst index de8e1d16974252..d745565da2c0aa 100644 --- a/docs/notebooks/003-hello-segmentation-with-output.rst +++ b/docs/notebooks/003-hello-segmentation-with-output.rst @@ -6,7 +6,7 @@ Hello Image Segmentation A very basic introduction to using segmentation models with OpenVINO™. In this tutorial, a pre-trained -`road-segmentation-adas-0001 `__ +`road-segmentation-adas-0001 `__ model from the `Open Model Zoo `__ is used. ADAS stands for Advanced Driver Assistance Services. The model diff --git a/docs/notebooks/004-hello-detection-with-output.rst b/docs/notebooks/004-hello-detection-with-output.rst index 8a96d8e68f06f6..b5b1a183e6072c 100644 --- a/docs/notebooks/004-hello-detection-with-output.rst +++ b/docs/notebooks/004-hello-detection-with-output.rst @@ -7,7 +7,7 @@ A very basic introduction to using object detection models with OpenVINO™. The -`horizontal-text-detection-0001 `__ +`horizontal-text-detection-0001 `__ model from `Open Model Zoo `__ is used. It detects horizontal text in images and returns a blob of data in the diff --git a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst index 6971a252d7d530..50b81fc51eade5 100644 --- a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst +++ b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst @@ -4,13 +4,13 @@ Convert a TensorFlow Model to OpenVINO™ | This short tutorial shows how to convert a TensorFlow - `MobileNetV3 `__ + `MobileNetV3 `__ image classification model to OpenVINO `Intermediate - Representation `__ + Representation `__ (OpenVINO IR) format, using `model conversion - API `__. + API `__. After creating the OpenVINO IR, load the model in `OpenVINO - Runtime `__ + Runtime `__ and do inference with a sample image. @@ -140,7 +140,7 @@ model directory and returns OpenVINO Model class instance which represents this model. Obtained model is ready to use and to be loaded on a device using ``compile_model`` or can be saved on a disk using the ``serialize`` function. See the -`tutorial `__ +`tutorial `__ for more information about using model conversion API with TensorFlow models. @@ -273,7 +273,7 @@ Timing `⇑ <#top>`__ Measure the time it takes to do inference on thousand images. This gives an indication of performance. For more accurate benchmarking, use the `Benchmark -Tool `__ +Tool `__ in OpenVINO. Note that many optimizations are possible to improve the performance. diff --git a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst index 4ff0c24ecd7c2a..ac8ce1e7bdf452 100644 --- a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst +++ b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst @@ -210,7 +210,7 @@ Convert ONNX Model to OpenVINO IR Format `⇑ <#top>`__ To convert the ONNX model to OpenVINO IR with ``FP16`` precision, use model conversion API. The models are saved inside the current directory. For more information on how to convert models, see this -`page `__. +`page `__. .. code:: ipython3 @@ -452,7 +452,7 @@ Performance Comparison `⇑ <#top>`__ Measure the time it takes to do inference on twenty images. This gives an indication of performance. For more accurate benchmarking, use the `Benchmark -Tool `__. +Tool `__. Keep in mind that many optimizations are possible to improve the performance. @@ -549,6 +549,6 @@ References `⇑ <#top>`__ - `OpenVINO ONNX support `__ - `Model Conversion API - documentation `__ + documentation `__ - `Converting Pytorch - model `__ + model `__ diff --git a/docs/notebooks/102-pytorch-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-to-openvino-with-output.rst index be0a9038b08091..d2b0b57549bff5 100644 --- a/docs/notebooks/102-pytorch-to-openvino-with-output.rst +++ b/docs/notebooks/102-pytorch-to-openvino-with-output.rst @@ -239,7 +239,7 @@ Starting from the 2023.0 release OpenVINO supports direct PyTorch models conversion to OpenVINO Intermediate Representation (IR) format. Model Optimizer Python API should be used for these purposes. More details regarding PyTorch model conversion can be found in OpenVINO -`documentation `__ +`documentation `__ .. note:: @@ -268,7 +268,7 @@ parameters, such as: and any other advanced options supported by model conversion Python API. More details can be found on this -`page `__ +`page `__ .. code:: ipython3 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst index 94f284cf6741ed..5c8a9fefd2b88d 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst @@ -267,7 +267,7 @@ accept path to PaddlePaddle model and returns OpenVINO Model class instance which represents this model. Obtained model is ready to use and loading on device using ``compile_model`` or can be saved on disk using ``serialize`` function. See the `Model Optimizer Developer -Guide `__ +Guide `__ for more information about Model Optimizer. .. code:: ipython3 @@ -368,7 +368,7 @@ Measure the time it takes to do inference on fifty images and compare the result. The timing information gives an indication of performance. For a fair comparison, we include the time it takes to process the image. For more accurate benchmarking, use the `OpenVINO benchmark -tool `__. +tool `__. Note that many optimizations are possible to improve the performance. .. code:: ipython3 @@ -498,6 +498,6 @@ References `⇑ <#top>`__ - `PaddleClas `__ - `OpenVINO PaddlePaddle - support `__ + support `__ - `OpenVINO Model Optimizer - Documentation `__ + Documentation `__ diff --git a/docs/notebooks/104-model-tools-with-output.rst b/docs/notebooks/104-model-tools-with-output.rst index 441028017b47b5..47cd5fd7e26980 100644 --- a/docs/notebooks/104-model-tools-with-output.rst +++ b/docs/notebooks/104-model-tools-with-output.rst @@ -212,9 +212,9 @@ Converting mobilenet-v2-pytorch… Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=/tmp/tmp3q4nxrwu --model_name=mobilenet-v2-pytorch --input=data '--mean_values=data[123.675,116.28,103.53]' '--scale_values=data[58.624,57.12,57.375]' --reverse_input_channels --output=prob --input_model=model/public/mobilenet-v2-pytorch/mobilenet-v2.onnx '--layout=data(NCHW)' '--input_shape=[1, 3, 224, 224]' --compress_to_fp16=True [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression by removing argument --compress_to_fp16 or set it to false --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/latest/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.1/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /tmp/tmp3q4nxrwu/mobilenet-v2-pytorch.xml [ SUCCESS ] BIN file: /tmp/tmp3q4nxrwu/mobilenet-v2-pytorch.bin diff --git a/docs/notebooks/105-language-quantize-bert-with-output.rst b/docs/notebooks/105-language-quantize-bert-with-output.rst index c7cdfb210868e0..61c8d152e6bcef 100644 --- a/docs/notebooks/105-language-quantize-bert-with-output.rst +++ b/docs/notebooks/105-language-quantize-bert-with-output.rst @@ -594,7 +594,7 @@ Frames Per Second (FPS) for images. Finally, measure the inference performance of OpenVINO ``FP32`` and ``INT8`` models. For this purpose, use -`Benchmark Tool `__ +`Benchmark Tool `__ in OpenVINO. .. note:: diff --git a/docs/notebooks/106-auto-device-with-output.rst b/docs/notebooks/106-auto-device-with-output.rst index 98166495d23c74..b1e37e02f7a376 100644 --- a/docs/notebooks/106-auto-device-with-output.rst +++ b/docs/notebooks/106-auto-device-with-output.rst @@ -2,19 +2,19 @@ Automatic Device Selection with OpenVINO™ ========================================= The `Auto -device `__ +device `__ (or AUTO in short) selects the most suitable device for inference by considering the model precision, power efficiency and processing capability of the available `compute -devices `__. +devices `__. The model precision (such as ``FP32``, ``FP16``, ``INT8``, etc.) is the first consideration to filter out the devices that cannot run the network efficiently. Next, if dedicated accelerators are available, these devices are preferred (for example, integrated and discrete -`GPU `__). -`CPU `__ +`GPU `__). +`CPU `__ is used as the default “fallback device”. Keep in mind that AUTO makes this selection only once, during the loading of a model. @@ -100,7 +100,7 @@ with ``openvino.runtime.Core().compile_model`` or serialized for next usage with ``openvino.runtime.serialize``. For more information about model conversion API, see this -`page `__. +`page `__. .. code:: ipython3 @@ -312,8 +312,8 @@ hints do not require any device-specific settings and they are completely portable between devices – meaning AUTO can configure the performance hint on whichever device is being used. -For more information, refer to the `Performance Hints `__ -section of `Automatic Device Selection `__ +For more information, refer to the `Performance Hints `__ +section of `Automatic Device Selection `__ article. Class and callback definition `⇑ <#top>`__ diff --git a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst index 39cf07b445206f..313abe78024581 100644 --- a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst +++ b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst @@ -347,7 +347,7 @@ steps: 1. Create a Dataset for quantization. 2. Run ``nncf.quantize`` for getting an optimized model. The ``nncf.quantize`` function provides an interface for model quantization. It requires an instance of the OpenVINO Model and quantization dataset. Optionally, some additional parameters for the configuration quantization process (number of samples for quantization, preset, ignored scope, etc.) can be provided. For more accurate results, we should keep the operation in the postprocessing subgraph in floating point precision, using the ``ignored_scope`` parameter. ``advanced_parameters`` can be used to specify advanced quantization parameters for fine-tuning the quantization algorithm. In this tutorial we pass range estimator parameters for activations. For more information see -`Tune quantization parameters `__. +`Tune quantization parameters `__. 3. Serialize OpenVINO IR model using ``openvino.runtime.serialize`` function. .. code:: ipython3 @@ -661,7 +661,7 @@ Compare Performance of the Original and Quantized Models `⇑ <#top>`__ ############################################################################################################################### `Benchmark -Tool `__ +Tool `__ is used to measure the inference performance of the ``FP16`` and ``INT8`` models. diff --git a/docs/notebooks/108-gpu-device-with-output.rst b/docs/notebooks/108-gpu-device-with-output.rst index 9d7f69faec7efe..fc236c92b8331e 100644 --- a/docs/notebooks/108-gpu-device-with-output.rst +++ b/docs/notebooks/108-gpu-device-with-output.rst @@ -80,10 +80,10 @@ cards `__. To get started, first `install -OpenVINO `__ +OpenVINO `__ on a system equipped with one or more Intel GPUs. Follow the `GPU configuration -instructions `__ +instructions `__ to configure OpenVINO to work with your GPU. Then, read on to learn how to accelerate inference with GPUs in OpenVINO! @@ -150,12 +150,12 @@ the system has a CPU, an integrated and discrete GPU, we should expect to see a list like this: ``['CPU', 'GPU.0', 'GPU.1']``. To simplify its use, the “GPU.0” can also be addressed with just “GPU”. For more details, see the `Device Naming -Convention `__ +Convention `__ section. If the GPUs are installed correctly on the system and still do not appear in the list, follow the steps described -`here `__ +`here `__ to configure your GPU drivers to work with OpenVINO. Once we have the GPUs working with OpenVINO, we can proceed with the next sections. @@ -269,7 +269,7 @@ the key properties are: speed up compilation time. To learn more about devices and properties, see the `Query Device -Properties `__ +Properties `__ page. Compiling a Model on GPU `⇑ <#top>`__ @@ -279,7 +279,7 @@ Compiling a Model on GPU `⇑ <#top>`__ Now, we know how to list the GPUs in the system and check their properties. We can easily use one for compiling and running models with OpenVINO `GPU -plugin `__. +plugin `__. Download and Convert a Model `⇑ <#top>`__ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -357,7 +357,7 @@ Convert the Model to OpenVINO IR format `⇑ <#top>`__ To convert the model to OpenVINO IR with ``FP16`` precision, use model conversion API. The models are saved to the ``model/ir_model/`` directory. For more details about model conversion, see this -`page `__. +`page `__. .. code:: ipython3 @@ -418,7 +418,7 @@ the ``available_devices`` method are valid device specifiers. You may also use “AUTO”, which will automatically select the best device for inference (which is often the GPU). To learn more about AUTO plugin, visit the `Automatic Device -Selection `__ +Selection `__ page as well as the `AUTO device tutorial `__. @@ -489,7 +489,7 @@ compile times with caching enabled and disabled as follows: The actual time improvements will depend on the environment as well as the model being used but it is definitely something to consider when optimizing an application. To read more about this, see the `Model -Caching `__ +Caching `__ docs. Throughput and Latency Performance Hints `⇑ <#top>`__ @@ -529,12 +529,12 @@ Using Multiple GPUs with Multi-Device and Cumulative Throughput `⇑ <#top>`__ The latency and throughput hints mentioned above are great and can make a difference when used adequately but they usually use just one device, either due to the `AUTO -plugin `__ +plugin `__ or by manual specification of the device name as above. When we have multiple devices, such as an integrated and discrete GPU, we may use both at the same time to improve the utilization of the resources. In order to do this, OpenVINO provides a virtual device called -`MULTI `__, +`MULTI `__, which is just a combination of the existent devices that knows how to split inference work between them, leveraging the capabilities of each device. @@ -563,7 +563,7 @@ manually specify devices to use. Below is an example showing how to use how to set up an asynchronous pipeline that takes advantage of parallelism to increase throughput. To learn more, see `Asynchronous - Inferencing `__ + Inferencing `__ in OpenVINO as well as the `Asynchronous Inference notebook `__. @@ -589,7 +589,7 @@ Note that benchmark_app only requires the model path to run but both the device and hint arguments will be useful to us. For more advanced usages, the tool itself has other options that can be checked by running ``benchmark_app -h`` or reading the -`docs `__. +`docs `__. The following example shows how to benchmark a simple model, using a GPU with a latency focus: @@ -1363,18 +1363,18 @@ To read more about any of these topics, feel free to visit their corresponding documentation: - `GPU - Plugin `__ + Plugin `__ - `AUTO - Plugin `__ + Plugin `__ - `Model - Caching `__ + Caching `__ - `MULTI Device - Mode `__ + Mode `__ - `Query Device - Properties `__ + Properties `__ - `Configurations for GPUs with - OpenVINO `__ + OpenVINO `__ - `Benchmark Python - Tool `__ + Tool `__ - `Asynchronous - Inferencing `__ + Inferencing `__ diff --git a/docs/notebooks/109-latency-tricks-with-output.rst b/docs/notebooks/109-latency-tricks-with-output.rst index 5d2d14fa85d4c4..4ce64f56c5afa5 100644 --- a/docs/notebooks/109-latency-tricks-with-output.rst +++ b/docs/notebooks/109-latency-tricks-with-output.rst @@ -514,7 +514,7 @@ OpenVINO IR model + more inference threads `⇑ <#top>`__ There is a possibility to add a config for any device (CPU in this case). We will increase the number of threads to an equal number of our cores. It should help us a lot. There are `more -options `__ +options `__ to be changed, so it’s worth playing with them to see what works best in our case. @@ -546,7 +546,7 @@ OpenVINO IR model in latency mode `⇑ <#top>`__ OpenVINO offers a virtual device called -`AUTO `__, +`AUTO `__, which can select the best device for us based on a performance hint. There are three different hints: ``LATENCY``, ``THROUGHPUT``, and ``CUMULATIVE_THROUGHPUT``. As this notebook is focused on the latency @@ -665,6 +665,6 @@ object detection model. Even if you experience much better performance after running this notebook, please note this may not be valid for every hardware or every model. For the most accurate results, please use ``benchmark_app`` `command-line -tool `__. +tool `__. Note that ``benchmark_app`` cannot measure the impact of some tricks above, e.g., shared memory. diff --git a/docs/notebooks/109-throughput-tricks-with-output.rst b/docs/notebooks/109-throughput-tricks-with-output.rst index c5e7a2c9646629..c9259e4b395f1b 100644 --- a/docs/notebooks/109-throughput-tricks-with-output.rst +++ b/docs/notebooks/109-throughput-tricks-with-output.rst @@ -500,7 +500,7 @@ configuration of the device. There are three different hints: notebook is focused on the throughput mode, we will use the latter two. The hints can be used with other devices as well. Throughput mode implicitly triggers using the `Automatic -Batching `__ +Batching `__ feature, which sets the batch size to the optimal level. .. code:: ipython3 @@ -556,7 +556,7 @@ OpenVINO IR model in throughput mode on AUTO `⇑ <#top>`__ OpenVINO offers a virtual device called -`AUTO `__, +`AUTO `__, which can select the best device for us based on the aforementioned performance hint. @@ -671,7 +671,7 @@ There are other tricks for performance improvement, such as advanced options, quantization and pre-post-processing or dedicated to latency mode. To get even more from your model, please visit `advanced throughput -options `__, +options `__, `109-latency-tricks <109-latency-tricks-with-output.html>`__, `111-detection-quantization <111-yolov5-quantization-migration-with-output.html>`__, and `118-optimize-preprocessing <118-optimize-preprocessing-with-output.html>`__. @@ -722,6 +722,6 @@ object detection model. Even if you experience much better performance after running this notebook, please note this may not be valid for every hardware or every model. For the most accurate results, please use ``benchmark_app`` `command-line -tool `__. +tool `__. Note that ``benchmark_app`` cannot measure the impact of some tricks above. diff --git a/docs/notebooks/110-ct-scan-live-inference-with-output.rst b/docs/notebooks/110-ct-scan-live-inference-with-output.rst index 0f3e10cca74df6..9ae34d9db77f08 100644 --- a/docs/notebooks/110-ct-scan-live-inference-with-output.rst +++ b/docs/notebooks/110-ct-scan-live-inference-with-output.rst @@ -113,7 +113,7 @@ Benchmark Model Performance `⇑ <#top>`__ ############################################################################################################################### To measure the inference performance of the IR model, use -`Benchmark Tool `__ +`Benchmark Tool `__ - an inference performance measurement tool in OpenVINO. Benchmark tool is a command-line application that can be run in the notebook with ``! benchmark_app`` or ``%sx benchmark_app`` commands. @@ -297,7 +297,7 @@ model will be cached, so after the first time model loading will be faster. For more information on OpenVINO Runtime, including Model Caching, refer to the `OpenVINO API tutorial <002-openvino-api-with-output.html>`__. -We will use `AsyncInferQueue `__ +We will use `AsyncInferQueue `__ to perform asynchronous inference. It can be instantiated with compiled model and a number of jobs - parallel execution threads. If you don’t pass a number of jobs or pass ``0``, then OpenVINO will pick the optimal diff --git a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst index b7089acadd661b..898fa83b906f45 100644 --- a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst +++ b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst @@ -14,7 +14,7 @@ scratch; the data is from This third tutorial in the series shows how to: - Convert an Original model to OpenVINO IR with `model conversion - API `__ + API `__ - Quantize a PyTorch model with NNCF - Evaluate the F1 score metric of the original model and the quantized model @@ -577,7 +577,7 @@ Compare Performance of the FP32 IR Model and Quantized Models `⇑ <#top>`__ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ To measure the inference performance of the ``FP32`` and ``INT8`` -models, we use `Benchmark Tool `__ +models, we use `Benchmark Tool `__ - OpenVINO’s inference performance measurement tool. Benchmark tool is a command line application, part of OpenVINO development tools, that can be run in the notebook with ``! benchmark_app`` or diff --git a/docs/notebooks/111-yolov5-quantization-migration-with-output.rst b/docs/notebooks/111-yolov5-quantization-migration-with-output.rst index 6181e22d00014e..b297f3425dac14 100644 --- a/docs/notebooks/111-yolov5-quantization-migration-with-output.rst +++ b/docs/notebooks/111-yolov5-quantization-migration-with-output.rst @@ -2,7 +2,7 @@ Migrate quantization from POT API to NNCF API ============================================= This tutorial demonstrates how to migrate quantization pipeline written -using the OpenVINO `Post-Training Optimization Tool (POT) `__ to +using the OpenVINO `Post-Training Optimization Tool (POT) `__ to `NNCF Post-Training Quantization API `__. This tutorial is based on `Ultralytics YOLOv5 `__ model and additionally it compares model accuracy between the FP32 precision and quantized INT8 @@ -160,7 +160,7 @@ following content: Convert the ONNX model to OpenVINO Intermediate Representation (IR) -model generated by `model conversion API `__. +model generated by `model conversion API `__. We will use the ``openvino.tools.mo.convert_model`` function of model conversion Python API to convert ONNX model to OpenVINO Model, then it can be serialized using ``openvino.runtime.serialize``. As the result, @@ -462,7 +462,7 @@ Quantization parameters ``preset``, ``model_type``, ``subset_size``, ``fast_bias_correction``, ``ignored_scope`` are arguments of function. More details about supported parameters and formats can be found in NNCF Post-Training Quantization -`documentation `__. +`documentation `__. NNCF also expect providing model object in inference framework format, in our case ``openvino.runtime.Model`` instance created using ``core.read_model`` or ``openvino.tools.mo.convert_model``. @@ -1193,8 +1193,8 @@ References `⇑ <#top>`__ - `Ultralytics YOLOv5 `__ - `OpenVINO Post-training Optimization - Tool `__ + Tool `__ - `NNCF Post-training quantization `__ - `Model Conversion - API `__ + API `__ diff --git a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst index 69d0e04db139f0..4b054d59389fe6 100644 --- a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst +++ b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst @@ -474,7 +474,7 @@ framework is designed so that modifications to your original training code are minor. Quantization is the simplest scenario and requires a few modifications. For more information about NNCF Post Training Quantization (PTQ) API, refer to the `Basic Quantization Flow -Guide `__. +Guide `__. 1. Create a transformation function that accepts a sample from the dataset and returns data suitable for model inference. This enables @@ -555,7 +555,7 @@ Python API . The models will be saved to the ‘OUTPUT’ directory for later benchmarking. For more information about model conversion, refer to this -`page `__. +`page `__. Before converting models, export them to ONNX. Executing the following command may take a while. @@ -668,7 +668,7 @@ IV. Compare performance of INT8 model and FP32 model in OpenVINO `⇑ <#top>`__ Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. By default, Benchmark Tool runs inference for 60 seconds in asynchronous mode on CPU. It returns inference speed as latency (milliseconds per image) and diff --git a/docs/notebooks/113-image-classification-quantization-with-output.rst b/docs/notebooks/113-image-classification-quantization-with-output.rst index 15e6e52b6f598a..95f2f7695c1078 100644 --- a/docs/notebooks/113-image-classification-quantization-with-output.rst +++ b/docs/notebooks/113-image-classification-quantization-with-output.rst @@ -100,7 +100,7 @@ static shape. The converted model is ready to be loaded on a device for inference and can be saved on a disk for next usage via the ``serialize`` function. More details about model conversion Python API can be found on this -`page `__. +`page `__. .. code:: ipython3 @@ -206,7 +206,7 @@ dataset for performing basic quantization. Optionally, additional parameters like ``subset_size``, ``preset``, ``ignored_scope`` can be provided to improve quantization result if applicable. More details about supported parameters can be found on this -`page `__ +`page `__ .. code:: ipython3 @@ -323,7 +323,7 @@ Compare Performance of the Original and Quantized Models `⇑ <#top>`__ Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. .. note:: diff --git a/docs/notebooks/115-async-api-with-output.rst b/docs/notebooks/115-async-api-with-output.rst index bec3bc9e219d8d..06a43e3aeb2550 100644 --- a/docs/notebooks/115-async-api-with-output.rst +++ b/docs/notebooks/115-async-api-with-output.rst @@ -467,7 +467,7 @@ Compare the performance `⇑ <#top>`__ Asynchronous mode pipelines can be supported with the -`AsyncInferQueue `__ +`AsyncInferQueue `__ wrapper class. This class automatically spawns the pool of ``InferRequest`` objects (also called “jobs”) and provides synchronization mechanisms to control the flow of the pipeline. It is a diff --git a/docs/notebooks/116-sparsity-optimization-with-output.rst b/docs/notebooks/116-sparsity-optimization-with-output.rst index 532094888deafe..c5ccc6437e9a41 100644 --- a/docs/notebooks/116-sparsity-optimization-with-output.rst +++ b/docs/notebooks/116-sparsity-optimization-with-output.rst @@ -12,7 +12,7 @@ which has been quantized, sparsified, and tuned for `SST2 datasets `__. It demonstrates the inference performance advantage on 4th Gen Intel® Xeon® Scalable Processors by running it with `Sparse Weight -Decompression `__, +Decompression `__, a runtime option that seizes model sparsity for efficiency. The notebook consists of the following steps: @@ -369,5 +369,5 @@ small sequence length, for example, 32 and lower. For more details about asynchronous inference with OpenVINO, refer to the following documentation: -- `Deployment Optimization Guide `__ -- `Inference Request API `__ +- `Deployment Optimization Guide `__ +- `Inference Request API `__ diff --git a/docs/notebooks/117-model-server-with-output.rst b/docs/notebooks/117-model-server-with-output.rst index 7cf130e876b78c..272e3b3bdca60d 100644 --- a/docs/notebooks/117-model-server-with-output.rst +++ b/docs/notebooks/117-model-server-with-output.rst @@ -225,7 +225,7 @@ Check whether the OVMS container is running normally: The required Model Server parameters are listed below. For additional configuration options, see the -`Model Server Parameters section `__. +`Model Server Parameters section `__. .. raw:: html @@ -884,6 +884,6 @@ References `⇑ <#top>`__ 1. `OpenVINO™ Model Server - documentation `__ + documentation `__ 2. `OpenVINO™ Model Server GitHub repository `__ diff --git a/docs/notebooks/118-optimize-preprocessing-with-output.rst b/docs/notebooks/118-optimize-preprocessing-with-output.rst index e9f19e107c96a3..cebce914098bd5 100644 --- a/docs/notebooks/118-optimize-preprocessing-with-output.rst +++ b/docs/notebooks/118-optimize-preprocessing-with-output.rst @@ -11,9 +11,9 @@ instrument, that enables integration of preprocessing steps into an execution graph and performing it on a selected device, which can improve device utilization. For more information about Preprocessing API, see this -`overview `__ +`overview `__ and -`details `__ +`details `__ This tutorial include following steps: @@ -217,7 +217,7 @@ Convert model to OpenVINO IR and setup preprocessing steps with model conversion To convert a TensorFlow model to OpenVINO IR, use the ``mo.convert_model`` python function of `model conversion -API `__. +API `__. The function returns instance of OpenVINO Model class, which is ready to use in Python interface but can also be serialized to OpenVINO IR format for future execution using ``openvino.runtime.serialize``. The models @@ -240,7 +240,7 @@ Setup the following conversions: Also converting of layout could be specified with ``layout`` option. More information and parameters described in the `Embedding Preprocessing Computation -article `__. +article `__. .. code:: ipython3 @@ -326,7 +326,7 @@ Graph modifications of a model shall be performed after the model is read from a drive and before it is loaded on the actual device. Pre-processing support following operations (please, see more details -`here `__) +`here `__) - Mean/Scale Normalization - Converting Precision @@ -360,7 +360,7 @@ Create ``PrePostProcessor`` Object `⇑ <#top>`__ The -`PrePostProcessor() `__ +`PrePostProcessor() `__ class enables specifying the preprocessing and postprocessing steps for a model. @@ -384,7 +384,7 @@ about user’s input tensor will be initialized to same data (type/shape/etc) as model’s input parameter. User application can override particular parameters according to application’s data. Refer to the following -`page `__ +`page `__ for more information about parameters for overriding. Below is all the specified input information: @@ -423,7 +423,7 @@ Declaring Model Layout `⇑ <#top>`__ Model input already has information about precision and shape. Preprocessing API is not intended to modify this. The only thing that may be specified is input data -`layout `__. +`layout `__. .. code:: ipython3 @@ -452,7 +452,7 @@ Preprocessing Steps `⇑ <#top>`__ Now, the sequence of preprocessing steps can be defined. For more information about preprocessing steps, see -`here `__. +`here `__. Perform the following: @@ -461,7 +461,7 @@ Perform the following: dynamic size, for example, ``{?, 3, ?, ?}`` resize will not know how to resize the picture. Therefore, in this case, target height/ width should be specified. For more details, see also the - `PreProcessSteps.resize() `__. + `PreProcessSteps.resize() `__. - Subtract mean from each channel. - Divide each pixel data to appropriate scale value. diff --git a/docs/notebooks/119-tflite-to-openvino-with-output.rst b/docs/notebooks/119-tflite-to-openvino-with-output.rst index 6bf4b8924cc08e..07d330269f823d 100644 --- a/docs/notebooks/119-tflite-to-openvino-with-output.rst +++ b/docs/notebooks/119-tflite-to-openvino-with-output.rst @@ -10,11 +10,11 @@ machine learning models to edge devices. This short tutorial shows how to convert a TensorFlow Lite `EfficientNet-Lite-B0 `__ image classification model to OpenVINO `Intermediate -Representation `__ +Representation `__ (OpenVINO IR) format, using `Model -Optimizer `__. +Optimizer `__. After creating the OpenVINO IR, load the model in `OpenVINO -Runtime `__ +Runtime `__ and do inference with a sample image. .. _top: @@ -111,9 +111,9 @@ using ``serialize`` function, reducing loading time for next running. Optionally, we can apply compression to the FP16 model weights, using the ``compress_to_fp16=True`` option and integrate preprocessing using this approach. For more information about model conversion, see this -`page `__. +`page `__. For TensorFlow Lite models support, refer to this -`tutorial `__. +`tutorial `__. .. code:: ipython3 @@ -222,7 +222,7 @@ Select device from dropdown list for running inference using OpenVINO: Estimate Model Performance `⇑ <#top>`__ ############################################################################################################################### -`Benchmark Tool `__ +`Benchmark Tool `__ is used to measure the inference performance of the model on CPU and GPU. diff --git a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst index 9e2ee53134913b..a66dcefb6a4884 100644 --- a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst +++ b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst @@ -19,9 +19,9 @@ This tutorial shows how to convert a TensorFlow `Faster R-CNN with Resnet-50 V1 `__ object detection model to OpenVINO `Intermediate -Representation `__ +Representation `__ (OpenVINO IR) format, using `Model -Optimizer `__. +Optimizer `__. After creating the OpenVINO IR, load the model in `OpenVINO Runtime `__ and do inference with a sample image. @@ -182,9 +182,9 @@ or saved on disk using the ``serialize`` function to reduce loading time when the model is run in the future. See the `Model Optimizer Developer -Guide `__ +Guide `__ for more information about Model Optimizer and TensorFlow `models -support `__. +support `__. .. code:: ipython3 @@ -694,5 +694,4 @@ utilization. For more information, refer to the `Optimize Preprocessing tutorial <118-optimize-preprocessing-with-output.html>`__ -and to the overview of `Preprocessing -API `__. +and to the overview of :doc:`Preprocessing API ` . diff --git a/docs/notebooks/121-convert-to-openvino-with-output.rst b/docs/notebooks/121-convert-to-openvino-with-output.rst index cf93b94ac741a3..13bca81bd9e271 100644 --- a/docs/notebooks/121-convert-to-openvino-with-output.rst +++ b/docs/notebooks/121-convert-to-openvino-with-output.rst @@ -50,7 +50,7 @@ OpenVINO IR format ------------------ OpenVINO `Intermediate Representation -(IR) `__ is the +(IR) `__ is the proprietary model format of OpenVINO. It is produced after converting a model with model conversion API. Model conversion API translates the frequently used deep learning operations to their respective similar @@ -68,7 +68,7 @@ tool. You can choose one of them based on whichever is most convenient for you. There should not be any differences in the results of model conversion if the same set of parameters is used. For more details, refer to `Model -Preparation `__ +Preparation `__ documentation. .. code:: ipython3 @@ -956,7 +956,7 @@ To convert a model to OpenVINO IR, use the following command: - Avoid using `tokenizers` before the fork if possible - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/distilbert.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/distilbert.bin @@ -991,20 +991,20 @@ Both Python conversion API and Model Optimizer command-line tool provide the following capabilities: \* overriding original input shapes for model conversion with ``input`` and ``input_shape`` parameters. `Setting Input Shapes -guide `__. +guide `__. \* cutting off unwanted parts of a model (such as unsupported operations and training sub-graphs) using the ``input`` and ``output`` parameters to define new inputs and outputs of the converted model. `Cutting Off Parts of a Model -guide `__. +guide `__. \* inserting additional input pre-processing sub-graphs into the converted model by using the ``mean_values``, ``scales_values``, ``layout``, and other parameters. `Embedding Preprocessing Computation -article `__. +article `__. \* compressing the model weights (for example, weights for convolutions and matrix multiplications) to FP16 data type using ``compress_to_fp16`` compression parameter. `Compression of a Model to FP16 -guide `__. +guide `__. If the out-of-the-box conversion (only the ``input_model`` parameter is specified) is not successful, it may be required to use the parameters @@ -1023,7 +1023,7 @@ up static shapes, model conversion API provides the ``input`` and ``input_shape`` parameters. For more information refer to `Setting Input Shapes -guide `__. +guide `__. .. code:: ipython3 @@ -1042,7 +1042,7 @@ guide `__. +guide `__. .. code:: ipython3 @@ -1181,7 +1181,7 @@ guide `__. +article `__. Specifying Layout ^^^^^^^^^^^^^^^^^ @@ -1232,7 +1232,7 @@ for both inputs and outputs. Some preprocessing requires to set input layouts, for example, setting a batch, applying mean or scales, and reversing input channels (BGR<->RGB). For the layout syntax, check the `Layout API -overview `__. +overview `__. To specify the layout, you can use the layout option followed by the layout value. @@ -1253,7 +1253,7 @@ Resnet50 model that was exported to the ONNX format: - Avoid using `tokenizers` before the fork if possible - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1291,7 +1291,7 @@ presented by input data. Use either ``layout`` or ``source_layout`` with - Avoid using `tokenizers` before the fork if possible - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1300,7 +1300,7 @@ presented by input data. Use either ``layout`` or ``source_layout`` with - Avoid using `tokenizers` before the fork if possible - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1342,7 +1342,7 @@ that the preprocessing takes negligible time for inference. - Avoid using `tokenizers` before the fork if possible - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1351,7 +1351,7 @@ that the preprocessing takes negligible time for inference. - Avoid using `tokenizers` before the fork if possible - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1390,7 +1390,7 @@ the color channels before inference. - Avoid using `tokenizers` before the fork if possible - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1427,9 +1427,9 @@ models, this decrease is negligible. - Avoid using `tokenizers` before the fork if possible - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression by removing argument --compress_to_fp16 or set it to false --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.1/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin diff --git a/docs/notebooks/201-vision-monodepth-with-output.rst b/docs/notebooks/201-vision-monodepth-with-output.rst index e98e4c37d8fdac..b26e82ff4e7ff0 100644 --- a/docs/notebooks/201-vision-monodepth-with-output.rst +++ b/docs/notebooks/201-vision-monodepth-with-output.rst @@ -5,7 +5,7 @@ Monodepth Estimation with OpenVINO This tutorial demonstrates Monocular Depth Estimation with MidasNet in OpenVINO. Model information can be found -`here `__. +`here `__. .. figure:: https://user-images.githubusercontent.com/36741649/127173017-a0bbcf75-db24-4d2c-81b9-616e04ab7cd9.gif :alt: monodepth diff --git a/docs/notebooks/202-vision-superresolution-image-with-output.rst b/docs/notebooks/202-vision-superresolution-image-with-output.rst index 2a9c26e53422db..6dd4a440732ba4 100644 --- a/docs/notebooks/202-vision-superresolution-image-with-output.rst +++ b/docs/notebooks/202-vision-superresolution-image-with-output.rst @@ -7,7 +7,7 @@ Super Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook shows the Single Image Super Resolution (SISR) which takes just one low resolution image. A model called -`single-image-super-resolution-1032 `__, +`single-image-super-resolution-1032 `__, which is available in Open Model Zoo, is used in this tutorial. It is based on the research paper cited below. diff --git a/docs/notebooks/202-vision-superresolution-video-with-output.rst b/docs/notebooks/202-vision-superresolution-video-with-output.rst index 7b48a8c64eddaa..52152cad081c55 100644 --- a/docs/notebooks/202-vision-superresolution-video-with-output.rst +++ b/docs/notebooks/202-vision-superresolution-video-with-output.rst @@ -7,7 +7,7 @@ Super Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook applies Single Image Super Resolution (SISR) to frames in a 360p (480×360) video in 360p resolution. A model called -`single-image-super-resolution-1032 `__, +`single-image-super-resolution-1032 `__, which is available in Open Model Zoo, is used in this tutorial. It is based on the research paper cited below. diff --git a/docs/notebooks/203-meter-reader-with-output.rst b/docs/notebooks/203-meter-reader-with-output.rst index eeec4746977f14..e426fec36bd693 100644 --- a/docs/notebooks/203-meter-reader-with-output.rst +++ b/docs/notebooks/203-meter-reader-with-output.rst @@ -571,7 +571,7 @@ Select device from dropdown list for running inference using OpenVINO: The number of detected meter from detection network can be arbitrary in some scenarios, which means the batch size of segmentation network input is a `dynamic -dimension `__, +dimension `__, and it should be specified as ``-1`` or the ``ov::Dimension()`` instead of a positive number used for static dimensions. In this case, for memory consumption optimization, we can specify the lower and/or upper diff --git a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst index 29f412a4194065..750508af69864b 100644 --- a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst +++ b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst @@ -450,7 +450,7 @@ While ONNX models are directly supported by OpenVINO runtime, it can be useful to convert them to IR format to take advantage of OpenVINO optimization tools and features. The ``mo.convert_model`` function of `model conversion -API `__ +API `__ can be used. The function returns instance of OpenVINO Model class, which is ready to use in Python interface but can also be serialized to OpenVINO IR format for future execution. @@ -603,7 +603,7 @@ Benchmarking performance of converted model `⇑ <#top>`__ Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the model. Note that for more accurate performance, it is recommended to run diff --git a/docs/notebooks/205-vision-background-removal-with-output.rst b/docs/notebooks/205-vision-background-removal-with-output.rst index 1c4ae2d1696ec4..976361459d5696 100644 --- a/docs/notebooks/205-vision-background-removal-with-output.rst +++ b/docs/notebooks/205-vision-background-removal-with-output.rst @@ -223,7 +223,7 @@ repository `__ and multiplied by 255 to support images with pixel values from 0-255. For more information about model conversion, refer to this -`page `__. +`page `__. Executing the following command may take a while. @@ -422,7 +422,7 @@ References `⇑ <#top>`__ - `PIP install openvino-dev `__ - `Model Conversion - API `__ + API `__ - `U^2-Net `__ - U^2-Net research paper: `U^2-Net: Going Deeper with Nested U-Structure for Salient Object diff --git a/docs/notebooks/206-vision-paddlegan-anime-with-output.rst b/docs/notebooks/206-vision-paddlegan-anime-with-output.rst index 32cafa0c20c30a..0dc6a88ad4c929 100644 --- a/docs/notebooks/206-vision-paddlegan-anime-with-output.rst +++ b/docs/notebooks/206-vision-paddlegan-anime-with-output.rst @@ -359,9 +359,9 @@ inputs are known, you can use model conversion API and convert the model to OpenVINO IR with these values. Use ``FP16`` precision and set log level to ``CRITICAL`` to ignore warnings that are irrelevant for this demo. For information about setting the parameters, see this -`page `__. +`page `__. -**Convert ONNX Model to OpenVINO IR with** `Model Conversion Python API `__ +**Convert ONNX Model to OpenVINO IR with** `Model Conversion Python API `__ .. code:: ipython3 @@ -596,7 +596,7 @@ References `⇑ <#top>`__ - `PaddleGAN `__ - `Paddle2ONNX `__ - `OpenVINO ONNX support `__ -- `Model Conversion API `__ +- `Model Conversion API `__ The PaddleGAN code that is shown in this notebook is written by PaddlePaddle Authors and licensed under the Apache 2.0 license. The diff --git a/docs/notebooks/208-optical-character-recognition-with-output.rst b/docs/notebooks/208-optical-character-recognition-with-output.rst index 871f7110dd1ef9..30524055a60e84 100644 --- a/docs/notebooks/208-optical-character-recognition-with-output.rst +++ b/docs/notebooks/208-optical-character-recognition-with-output.rst @@ -9,9 +9,9 @@ This tutorial demonstrates how to perform optical character recognition tutorial, which shows only text detection. The -`horizontal-text-detection-0001 `__ +`horizontal-text-detection-0001 `__ and -`text-recognition-resnet `__ +`text-recognition-resnet `__ models are used together for text detection and then text recognition. In this tutorial, Open Model Zoo tools including Model Downloader, Model @@ -343,9 +343,9 @@ Converting text-recognition-resnet-fc… Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=/tmp/tmppkwl27u7 --model_name=text-recognition-resnet-fc --input=input '--mean_values=input[127.5]' '--scale_values=input[127.5]' --output=output --input_model=model/public/text-recognition-resnet-fc/resnet_fc.onnx '--layout=input(NCHW)' '--input_shape=[1, 1, 32, 100]' --compress_to_fp16=True [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression by removing argument --compress_to_fp16 or set it to false --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.1/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /tmp/tmppkwl27u7/text-recognition-resnet-fc.xml [ SUCCESS ] BIN file: /tmp/tmppkwl27u7/text-recognition-resnet-fc.bin diff --git a/docs/notebooks/209-handwritten-ocr-with-output.rst b/docs/notebooks/209-handwritten-ocr-with-output.rst index 8aa26383d21a6c..454802bf631b0e 100644 --- a/docs/notebooks/209-handwritten-ocr-with-output.rst +++ b/docs/notebooks/209-handwritten-ocr-with-output.rst @@ -10,9 +10,9 @@ Latin alphabet is available in `notebook This model is capable of processing only one line of symbols at a time. The models used in this notebook are -`handwritten-japanese-recognition-0001 `__ +`handwritten-japanese-recognition-0001 `__ and -`handwritten-simplified-chinese-0001 `__. +`handwritten-simplified-chinese-0001 `__. To decode model outputs as readable text `kondate_nakayosi `__ and diff --git a/docs/notebooks/215-image-inpainting-with-output.rst b/docs/notebooks/215-image-inpainting-with-output.rst index f9ecfbafeeb006..c431ee55da9359 100644 --- a/docs/notebooks/215-image-inpainting-with-output.rst +++ b/docs/notebooks/215-image-inpainting-with-output.rst @@ -50,7 +50,7 @@ Download ``gmcnn-places2-tf``\ model (this step will be skipped if the model is unzip it. Downloaded model stored in TensorFlow frozen graph format. The steps how this frozen graph can be obtained from original model checkpoint can be found in this -`instruction `__ +`instruction `__ .. code:: ipython3 @@ -82,7 +82,7 @@ Convert Tensorflow model to OpenVINO IR format `⇑ <#top>`__ The pre-trained model is in TensorFlow format. To use it with OpenVINO, convert it to OpenVINO IR format with model conversion API. For more information about model conversion, see this -`page `__. +`page `__. This step is also skipped if the model is already converted. .. code:: ipython3 diff --git a/docs/notebooks/216-attention-center-with-output.rst b/docs/notebooks/216-attention-center-with-output.rst index 2a5dcfc7c8aba2..0e50d17ec85e40 100644 --- a/docs/notebooks/216-attention-center-with-output.rst +++ b/docs/notebooks/216-attention-center-with-output.rst @@ -124,7 +124,7 @@ format. In this Notebook the model will be converted to OpenVINO IR format with Model Optimizer. This step will be skipped if the model have already been converted. For more information about Model Optimizer, please, see the `Model Optimizer Developer -Guide `__. +Guide `__. Also TFLite models format is supported in OpenVINO by TFLite frontend, so the model can be passed directly to ``core.read_model()``. You can diff --git a/docs/notebooks/217-vision-deblur-with-output.rst b/docs/notebooks/217-vision-deblur-with-output.rst index 6e0f7067823a60..1241fab1900fa3 100644 --- a/docs/notebooks/217-vision-deblur-with-output.rst +++ b/docs/notebooks/217-vision-deblur-with-output.rst @@ -30,7 +30,7 @@ DeblurGAN-v2 in OpenVINO, by first converting the `VITA-Group/DeblurGANv2 `__ model to OpenVINO Intermediate Representation (OpenVINO IR) format. For more information about the model, see the -`documentation `__. +`documentation `__. What is deblurring? `⇑ <#top>`__ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -219,7 +219,7 @@ an OpenVINO model ready to load on a device and start making predictions. We can save it on a disk for next usage with ``openvino.runtime.serialize``. For more information about model conversion Python API, see this -`page `__. +`page `__. Model conversion may take a while. diff --git a/docs/notebooks/219-knowledge-graphs-conve-with-output.rst b/docs/notebooks/219-knowledge-graphs-conve-with-output.rst index 07fd9413bcaf79..67c8776cff2d4a 100644 --- a/docs/notebooks/219-knowledge-graphs-conve-with-output.rst +++ b/docs/notebooks/219-knowledge-graphs-conve-with-output.rst @@ -371,7 +371,7 @@ To evaluate performance with OpenVINO, we can either convert the trained PyTorch model to an intermediate representation (IR) format or to an ONNX representation. This notebook uses the ONNX format. For more details on model optimization, refer to: -https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html +https://docs.openvino.ai/2023.1/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html .. code:: ipython3 @@ -486,7 +486,7 @@ The OpenVINO toolkit provides a benchmarking application to gauge the platform specific runtime performance that can be obtained under optimal configuration parameters for a given model. For more details refer to: -https://docs.openvino.ai/2023.0/openvino_inference_engine_tools_benchmark_tool_README.html +https://docs.openvino.ai/2023.1/openvino_inference_engine_tools_benchmark_tool_README.html Here, we use the benchmark application to obtain performance estimates under optimal configuration for the knowledge graph model inference. We @@ -533,7 +533,7 @@ perform a sample evaluation on the knowledge graph. Then, we determine the platform specific speedup in runtime performance that can be obtained through OpenVINO graph optimizations. To learn more about the OpenVINO performance optimizations, refer to: -https://docs.openvino.ai/2023.0/openvino_docs_optimization_guide_dldt_optimization_guide.html +https://docs.openvino.ai/2023.1/openvino_docs_deployment_optimization_guide_dldt_optimization_guide.html References `⇑ <#top>`__ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst index 88d0874160f97a..b007cc024a3b89 100644 --- a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst +++ b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst @@ -484,7 +484,7 @@ Optimize the Model with OpenVINO `⇑ <#top>`__ The LaBSE model is quite large and can be slow to infer on some hardware, so let’s optimize it with OpenVINO. `Model conversion Python -API `__ +API `__ accepts the PyTorch/Transformers model object and additional information about model inputs. An ``example_input`` is needed to trace the model execution graph, as PyTorch constructs it dynamically during inference. @@ -878,7 +878,7 @@ the pipeline - getting embeddings. You might wonder why, when using OpenVINO, you need to compile the model after reading it. There are two main reasons for this: 1. Compatibility with different devices. The model can be compiled to run on a `specific -device `__, +device `__, like CPU, GPU or GNA. Each device may work with different data types, support different features, and gain performance by changing the neural network for a specific computing model. With OpenVINO, you do not need @@ -887,13 +887,13 @@ hardware. A universal OpenVINO model representation is enough. 1. Optimization for different scenarios. For example, one scenario prioritizes minimizing the *time between starting and finishing model inference* (`latency-oriented -optimization `__). +optimization `__). In our case, it is more important *how many texts per second the model can process* (`throughput-oriented -optimization `__). +optimization `__). To get a throughput-optimized model, pass a `performance -hint `__ +hint `__ as a configuration during compilation. Then OpenVINO selects the optimal parameters for execution on the available hardware. @@ -912,7 +912,7 @@ parameters for execution on the available hardware. To further optimize hardware utilization, let’s change the inference mode from synchronous (Sync) to asynchronous (Async). While the synchronous API may be easier to start with, it is -`recommended `__ +`recommended `__ to use the asynchronous (callbacks-based) API in production code. It is the most general and scalable way to implement flow control for any number of requests. @@ -960,7 +960,7 @@ Let’s compare the models and plot the results. .. note:: To get a more accurate benchmark, use the `Benchmark Python - Tool `__ + Tool `__ .. code:: ipython3 @@ -1076,8 +1076,8 @@ boost. Here are useful links with information about the techniques used in this notebook: - `OpenVINO performance -hints `__ +hints `__ - `OpenVINO Async -API `__ +API `__ - `Throughput -Optimizations `__ +Optimizations `__ diff --git a/docs/notebooks/222-vision-image-colorization-with-output.rst b/docs/notebooks/222-vision-image-colorization-with-output.rst index 5d3d32c0655969..8d11c9030fc30e 100644 --- a/docs/notebooks/222-vision-image-colorization-with-output.rst +++ b/docs/notebooks/222-vision-image-colorization-with-output.rst @@ -213,9 +213,9 @@ respectively Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=/tmp/tmp7wsuasz7 --model_name=colorization-v2 --input=data_l --output=color_ab --input_model=models/public/colorization-v2/colorization-v2-eccv16.onnx '--layout=data_l(NCHW)' '--input_shape=[1, 1, 256, 256]' --compress_to_fp16=True [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression by removing argument --compress_to_fp16 or set it to false --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.1/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /tmp/tmp7wsuasz7/colorization-v2.xml [ SUCCESS ] BIN file: /tmp/tmp7wsuasz7/colorization-v2.bin diff --git a/docs/notebooks/223-text-prediction-with-output.rst b/docs/notebooks/223-text-prediction-with-output.rst index eeb9f79f0f2097..97a7a1d8d8542e 100644 --- a/docs/notebooks/223-text-prediction-with-output.rst +++ b/docs/notebooks/223-text-prediction-with-output.rst @@ -192,7 +192,7 @@ While ONNX models are directly supported by OpenVINO runtime, it can be useful to convert them to IR format to take advantage of OpenVINO optimization tools and features. The ``mo.convert_model`` Python function of `model conversion -API `__ +API `__ can be used for converting the model. The function returns instance of OpenVINO Model class, which is ready to use in Python interface but can also be serialized to OpenVINO IR format for future execution using diff --git a/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst b/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst index 8934a54ec2e574..8dcb6fa3b7f11d 100644 --- a/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst +++ b/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst @@ -79,7 +79,7 @@ function returns an OpenVINO model ready to load on a device and start making predictions. We can save it on a disk for next usage with ``openvino.runtime.serialize``. For more information about model conversion Python API, see this -`page `__. +`page `__. .. code:: ipython3 diff --git a/docs/notebooks/226-yolov7-optimization-with-output.rst b/docs/notebooks/226-yolov7-optimization-with-output.rst index 5867c26429a728..e87f4de95642ce 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output.rst +++ b/docs/notebooks/226-yolov7-optimization-with-output.rst @@ -911,7 +911,7 @@ Compare Performance of the Original and Quantized Models `⇑ <#top>`__ ############################################################################################################################### Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/229-distilbert-sequence-classification-with-output.rst b/docs/notebooks/229-distilbert-sequence-classification-with-output.rst index 018993b6f036f9..4095cec55bf40f 100644 --- a/docs/notebooks/229-distilbert-sequence-classification-with-output.rst +++ b/docs/notebooks/229-distilbert-sequence-classification-with-output.rst @@ -77,7 +77,7 @@ understand the context of a sentence. Here, we will use Convert Model to OpenVINO Intermediate Representation format. `⇑ <#top>`__ ############################################################################################################################### -`Model conversion API `__ +`Model conversion API `__ facilitates the transition between training and deployment environments, performs static model analysis, and adjusts deep learning models for optimal execution on end-point target devices. @@ -97,14 +97,14 @@ optimal execution on end-point target devices. mask, torch.tensor(torch.finfo(scores.dtype).min) -OpenVINO™ Runtime uses the `Infer Request `__ +OpenVINO™ Runtime uses the `Infer Request `__ mechanism which enables running models on different devices in asynchronous or synchronous manners. The model graph is sent as an argument to the OpenVINO API and an inference request is created. The default inference mode is AUTO but it can be changed according to requirements and hardware available. You can explore the different inference modes and their usage `in -documentation. `__ +documentation. `__ .. code:: ipython3 diff --git a/docs/notebooks/230-yolov8-optimization-with-output.rst b/docs/notebooks/230-yolov8-optimization-with-output.rst index f3083e063aa1a2..e8a9f31bcd6d0b 100644 --- a/docs/notebooks/230-yolov8-optimization-with-output.rst +++ b/docs/notebooks/230-yolov8-optimization-with-output.rst @@ -1199,7 +1199,7 @@ Compare Performance of the Original and Quantized Models `⇑ <#top>`__ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. @@ -1680,8 +1680,7 @@ on a selected device (CPU/GPU etc.) rather than always being executed on CPU as part of an application. This will improve selected device utilization. -For more information, refer to the overview of `Preprocessing -API `__. +For more information, refer to the overview of :doc:`Preprocessing API ` . For example, we can integrate converting input data layout and normalization defined in ``image_to_tensor`` function. diff --git a/docs/notebooks/232-clip-language-saliency-map-with-output.rst b/docs/notebooks/232-clip-language-saliency-map-with-output.rst index e211581e457506..c7bd680d28289d 100644 --- a/docs/notebooks/232-clip-language-saliency-map-with-output.rst +++ b/docs/notebooks/232-clip-language-saliency-map-with-output.rst @@ -410,7 +410,7 @@ text encoder. You can split the CLIP into two models and call them separately. To convert the model to IR, you can use `Model Optimizer -(MO) `__. +(MO) `__. When you convert a model to the OpenVINO format, Model Optimizer enables specifying the inputs and outputs you want to use. During the conversion, it will trim the remaining parts of the model. Therefore, diff --git a/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst b/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst index 3ab1065358ffbc..471e72ca3d0aea 100644 --- a/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst +++ b/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst @@ -329,7 +329,7 @@ example, input and output names or dynamic shapes). While ONNX models are directly supported by OpenVINO™ runtime, it can be useful to convert them to IR format to take the advantage of advanced OpenVINO optimization tools and features. We will use `model conversion -API `__ +API `__ to convert a model to IR format and compression weights to ``FP16`` format. diff --git a/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst index 4a1e447144f312..75656ed47aa094 100644 --- a/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst @@ -470,7 +470,7 @@ generated latents channels + 4 for latent representation of masked image Text Encoder successfully converted to ONNX [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /home/ea/work/openvino_notebooks/notebooks/236-stable-diffusion-v2/sd2_inpainting/text_encoder.xml [ SUCCESS ] BIN file: /home/ea/work/openvino_notebooks/notebooks/236-stable-diffusion-v2/sd2_inpainting/text_encoder.bin @@ -515,7 +515,7 @@ generated latents channels + 4 for latent representation of masked image U-Net successfully converted to ONNX [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /home/ea/work/openvino_notebooks/notebooks/236-stable-diffusion-v2/sd2_inpainting/unet.xml [ SUCCESS ] BIN file: /home/ea/work/openvino_notebooks/notebooks/236-stable-diffusion-v2/sd2_inpainting/unet.bin @@ -561,7 +561,7 @@ generated latents channels + 4 for latent representation of masked image VAE encoder successfully converted to ONNX [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /home/ea/work/openvino_notebooks/notebooks/236-stable-diffusion-v2/sd2_inpainting/vae_encoder.xml [ SUCCESS ] BIN file: /home/ea/work/openvino_notebooks/notebooks/236-stable-diffusion-v2/sd2_inpainting/vae_encoder.bin @@ -582,7 +582,7 @@ generated latents channels + 4 for latent representation of masked image VAE decoder successfully converted to ONNX [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /home/ea/work/openvino_notebooks/notebooks/236-stable-diffusion-v2/sd2_inpainting/vae_decoder.xml [ SUCCESS ] BIN file: /home/ea/work/openvino_notebooks/notebooks/236-stable-diffusion-v2/sd2_inpainting/vae_decoder.bin diff --git a/docs/notebooks/236-stable-diffusion-v2-optimum-demo-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-optimum-demo-with-output.rst index f44eda207c3306..bfa6ef6dce9ef0 100644 --- a/docs/notebooks/236-stable-diffusion-v2-optimum-demo-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-optimum-demo-with-output.rst @@ -55,7 +55,7 @@ in this notebook is `helenai/stabilityai-stable-diffusion-2-1-base-ov `__. Let’s download the pre-converted model Stable Diffusion 2.1 `Intermediate Representation Format -(IR) `__ +(IR) `__ Showing Info Available Devices `⇑ <#top>`__ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst index f8cb417e3cf49c..33a4df82bfdbfc 100644 --- a/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst @@ -185,7 +185,7 @@ example, input and output names or dynamic shapes). While ONNX models are directly supported by OpenVINO™ runtime, it can be useful to convert them to IR format to take the advantage of advanced OpenVINO optimization tools and features. We will use OpenVINO `Model -Optimizer `__ +Optimizer `__ to convert a model to IR format. The pipeline consists of three important parts: diff --git a/docs/notebooks/237-segment-anything-with-output.rst b/docs/notebooks/237-segment-anything-with-output.rst index 25969d47260999..2db34401ec919d 100644 --- a/docs/notebooks/237-segment-anything-with-output.rst +++ b/docs/notebooks/237-segment-anything-with-output.rst @@ -1962,7 +1962,7 @@ Compare Performance of the Original and Quantized Models `⇑ <#top>`__ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/238-deep-floyd-if-with-output.rst b/docs/notebooks/238-deep-floyd-if-with-output.rst index 5701933a9efb3a..4f46b3c9ead026 100644 --- a/docs/notebooks/238-deep-floyd-if-with-output.rst +++ b/docs/notebooks/238-deep-floyd-if-with-output.rst @@ -313,7 +313,7 @@ shape, type, and value within a single argument, providing greater flexibility. To learn more, refer to this -`page `__ +`page `__ .. code:: ipython3 diff --git a/docs/notebooks/239-image-bind-convert-with-output.rst b/docs/notebooks/239-image-bind-convert-with-output.rst index ffd69a13191468..a9354866a28de0 100644 --- a/docs/notebooks/239-image-bind-convert-with-output.rst +++ b/docs/notebooks/239-image-bind-convert-with-output.rst @@ -237,7 +237,7 @@ While ONNX models are directly supported by OpenVINO™ runtime, it can be useful to convert them to IR format to take advantage of advanced OpenVINO optimization tools and features. You will use `model conversion Python -API `__ +API `__ to convert model to IR format and compress weights to ``FP16`` format. The ``mo.convert_model`` function returns OpenVINO Model class instance ready to load on a device or save on a disk for next loading. diff --git a/docs/notebooks/242-freevc-voice-conversion-with-output.rst b/docs/notebooks/242-freevc-voice-conversion-with-output.rst index 1c39257b4a76f0..0a372bf31c85b7 100644 --- a/docs/notebooks/242-freevc-voice-conversion-with-output.rst +++ b/docs/notebooks/242-freevc-voice-conversion-with-output.rst @@ -288,7 +288,7 @@ model. The obtained model is ready to use and to be loaded on a device using ``compile_model`` or can be saved on a disk using the ``serialize`` function. The ``read_model`` method loads a saved model from a disk. For more information about model conversion, see this -`page `__. +`page `__. Convert Prior Encoder. `⇑ <#top>`__ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst b/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst index c709cd516e92e6..f989c47a6e0bd8 100644 --- a/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst +++ b/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst @@ -135,9 +135,9 @@ next running. Optionally, we can apply compression to the FP16 model weights, using the ``compress_to_fp16=True`` option and integrate preprocessing, using this approach. For more information about model conversion, see this -`page `__. +`page `__. For TensorFlow Lite, refer to the `models -support `__. +support `__. .. code:: ipython3 diff --git a/docs/notebooks/245-typo-detector-with-output.rst b/docs/notebooks/245-typo-detector-with-output.rst index b5e609416e8dcb..a9248929307134 100644 --- a/docs/notebooks/245-typo-detector-with-output.rst +++ b/docs/notebooks/245-typo-detector-with-output.rst @@ -77,9 +77,9 @@ hardware. First the Pytorch model is converted to the ONNX format and then the `Model -Optimizer `__ +Optimizer `__ tool will be used to convert to `OpenVINO IR -format `__. This +format `__. This method provides much more insight to how to set up a pipeline from model loading to model converting, compiling and running inference with OpenVINO, so that you could conveniently use OpenVINO to optimize and @@ -379,14 +379,14 @@ Model Optimizer ''''''''''''''' `Model -Optimizer `__ +Optimizer `__ is a cross-platform command-line tool that facilitates the transition between training and deployment environments, performs static model analysis, and adjusts deep learning models for optimal execution on end-point target devices. Model Optimizer converts the model to the OpenVINO Intermediate Representation format (IR), which you can infer later with `OpenVINO -runtime `__. +runtime `__. .. code:: ipython3 @@ -399,7 +399,7 @@ Inference OpenVINO™ Runtime Python API is used to compile the model in OpenVINO IR format. The -`Core `__ +`Core `__ class from the ``openvino.runtime`` module is imported first. This class provides access to the OpenVINO Runtime API. The ``core`` object, which is an instance of the ``Core`` class, represents the API and it is used diff --git a/docs/notebooks/246-depth-estimation-videpth-with-output.rst b/docs/notebooks/246-depth-estimation-videpth-with-output.rst index eb91c99950ab26..98c52ee17fe864 100644 --- a/docs/notebooks/246-depth-estimation-videpth-with-output.rst +++ b/docs/notebooks/246-depth-estimation-videpth-with-output.rst @@ -64,7 +64,7 @@ repository `__ for the pre-processing, model transformations and basic utility code. A part of it has already been kept as it is in the `utils `__ directory. At the same time we will learn how to perform `model -conversion `__ +conversion `__ for converting a model in a different format to the standard OpenVINO™ IR model representation *via* another format. @@ -312,7 +312,7 @@ Dummy input creation ^^^^^^^^^^^^^^^^^^^^ Dummy inputs are necessary for `PyTorch to -ONNX `__ +ONNX `__ conversion. Although `torch.onnx.export `__ accepts any dummy input for a single pass through the model and thereby diff --git a/docs/notebooks/247-code-language-id-with-output.rst b/docs/notebooks/247-code-language-id-with-output.rst index 778ddf7e6e1ca4..2d0c9d3019b418 100644 --- a/docs/notebooks/247-code-language-id-with-output.rst +++ b/docs/notebooks/247-code-language-id-with-output.rst @@ -7,7 +7,7 @@ Overview This tutorial will be divided in 2 parts: 1. Create a simple inference pipeline with a pre-trained model using the OpenVINO™ IR format. -2. Conduct `post-training quantization `__ +2. Conduct `post-training quantization `__ on a pre-trained model using Hugging Face Optimum and benchmark performance. Feel free to use the notebook outline in Jupyter or your IDE for easy @@ -257,7 +257,7 @@ Part 2: OpenVINO post-training quantization with HuggingFace Optimum In this section, we will quantize a trained model. At a high-level, this process consists of using lower precision numbers in the model, which results in a smaller model size and faster inference at the cost of a -potential marginal performance degradation. `Learn more `__. +potential marginal performance degradation. `Learn more `__. The HuggingFace Optimum library supports post-training quantization for OpenVINO. `Learn more `__. diff --git a/docs/notebooks/250-music-generation-with-output.rst b/docs/notebooks/250-music-generation-with-output.rst index 733e303c35fa9e..564fe33f99f9e1 100644 --- a/docs/notebooks/250-music-generation-with-output.rst +++ b/docs/notebooks/250-music-generation-with-output.rst @@ -206,7 +206,7 @@ a time and this vector will just consist of ones. We use OpenVINO Converter (OVC) below to convert the PyTorch model to the OpenVINO Intermediate Representation format (IR), which you can infer later with `OpenVINO -runtime `__ +runtime `__ .. code:: ipython3 @@ -364,7 +364,7 @@ Embedding the converted models into the original pipeline `⇑ <#top>`__ OpenVINO™ Runtime Python API is used to compile the model in OpenVINO IR format. The -`Core `__ +`Core `__ class provides access to the OpenVINO Runtime API. The ``core`` object, which is an instance of the ``Core`` class represents the API and it is used to compile the model. diff --git a/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst b/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst index 6054fb8ae8c823..bce43dba73904d 100644 --- a/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst +++ b/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst @@ -399,11 +399,11 @@ Download Intermediate Representation (IR) model. ir_model = ie.read_model(model_xml) Use `Basic Quantization -Flow `__. +Flow `__. To use the most advanced quantization flow that allows to apply 8-bit quantization to the model with accuracy control see `Quantizing with accuracy -control `__. +control `__. .. code:: ipython3 @@ -584,7 +584,7 @@ Compare Inference Speed ----------------------- Measure inference speed with the `OpenVINO Benchmark -App `__. +App `__. Benchmark App is a command line tool that measures raw inference performance for a specified OpenVINO IR model. Run @@ -594,7 +594,7 @@ the ``-m`` parameter with asynchronous inference on CPU, for one minute. Use the ``-d`` parameter to test performance on a different device, for example an Intel integrated Graphics (iGPU), and ``-t`` to set the number of seconds to run inference. See the -`documentation `__ +`documentation `__ for more information. This tutorial uses a wrapper function from `Notebook @@ -875,7 +875,7 @@ cached to the ``model_cache`` directory. With a recent Intel CPU, the best performance can often be achieved by doing inference on both the CPU and the iGPU, with OpenVINO’s `Multi Device -Plugin `__. +Plugin `__. It takes a bit longer to load a model on GPU than on CPU, so this benchmark will take a bit longer to complete than the CPU benchmark. diff --git a/docs/notebooks/301-tensorflow-training-openvino-with-output.rst b/docs/notebooks/301-tensorflow-training-openvino-with-output.rst index 0b02ba0ee4f272..6bb74ee2b89df4 100644 --- a/docs/notebooks/301-tensorflow-training-openvino-with-output.rst +++ b/docs/notebooks/301-tensorflow-training-openvino-with-output.rst @@ -924,7 +924,7 @@ Convert the TensorFlow model with OpenVINO Model Optimizer `⇑ <#top>`__ To convert the model to OpenVINO IR with ``FP16`` precision, use model conversion Python API. For more information, see this -`page `__. +`page `__. .. code:: ipython3 diff --git a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst index 3cc99a837ea6bb..0448bcbd2c1793 100644 --- a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst +++ b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst @@ -703,7 +703,7 @@ scale the input with the standard deviation by the ``mean_values`` and before propagating it through the network with these options. For more information about model conversion, see this -`page `__. +`page `__. .. code:: ipython3 @@ -733,7 +733,7 @@ Benchmark Model Performance by Computing Inference Time `⇑ <#top>`__ Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - inference performance measurement tool in OpenVINO. By default, Benchmark Tool runs inference for 60 seconds in asynchronous mode on CPU. It returns inference speed as latency (milliseconds per image) and diff --git a/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst b/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst index 8f0ad9a7f72357..7d6e7934675d69 100644 --- a/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst +++ b/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst @@ -431,7 +431,7 @@ Export Models to OpenVINO Intermediate Representation (IR) `⇑ <#top>`__ Use model conversion Python API to convert the models to OpenVINO IR. For more information about model conversion, see this -`page `__. +`page `__. Executing this command may take a while. @@ -473,7 +473,7 @@ Benchmark Model Performance by Computing Inference Time `⇑ <#top>`__ Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. By default, Benchmark Tool runs inference for 60 seconds in asynchronous mode on CPU. It returns inference speed as latency (milliseconds per image) and diff --git a/docs/notebooks/401-object-detection-with-output.rst b/docs/notebooks/401-object-detection-with-output.rst index 45ee50e220e9df..da6f2e47f99c40 100644 --- a/docs/notebooks/401-object-detection-with-output.rst +++ b/docs/notebooks/401-object-detection-with-output.rst @@ -156,7 +156,7 @@ Convert the Model `⇑ <#top>`__ The pre-trained model is in TensorFlow format. To use it with OpenVINO, convert it to OpenVINO IR format, using `model conversion Python -API `__ +API `__ (``mo.convert_model`` function). If the model has been already converted, this step is skipped. diff --git a/docs/notebooks/406-3D-pose-estimation-with-output.rst b/docs/notebooks/406-3D-pose-estimation-with-output.rst index 121a5d44326cef..4cef53e5c7fb38 100644 --- a/docs/notebooks/406-3D-pose-estimation-with-output.rst +++ b/docs/notebooks/406-3D-pose-estimation-with-output.rst @@ -212,7 +212,7 @@ format. Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-475/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=/tmp/tmpgwxi10io --model_name=human-pose-estimation-3d-0001 --input=data '--mean_values=data[128.0,128.0,128.0]' '--scale_values=data[255.0,255.0,255.0]' --output=features,heatmaps,pafs --input_model=model/public/human-pose-estimation-3d-0001/human-pose-estimation-3d-0001.onnx '--layout=data(NCHW)' '--input_shape=[1, 3, 256, 448]' --compress_to_fp16=False [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /tmp/tmpgwxi10io/human-pose-estimation-3d-0001.xml [ SUCCESS ] BIN file: /tmp/tmpgwxi10io/human-pose-estimation-3d-0001.bin diff --git a/docs/notebooks/407-person-tracking-with-output.rst b/docs/notebooks/407-person-tracking-with-output.rst index b267e6bd9ec6dc..9e11051c3f436c 100644 --- a/docs/notebooks/407-person-tracking-with-output.rst +++ b/docs/notebooks/407-person-tracking-with-output.rst @@ -184,15 +184,15 @@ Representation (OpenVINO IR). Using a model outside the list can require different pre- and post-processing. -In this case, `person detection model `__ +In this case, `person detection model `__ is deployed to detect the person in each frame of the video, and -`reidentification model `__ +`reidentification model `__ is used to output embedding vector to match a pair of images of a person by the cosine distance. If you want to download another model (``person-detection-xxx`` from -`Object Detection Models list `__, -``person-reidentification-retail-xxx`` from `Reidentification Models list `__), +`Object Detection Models list `__, +``person-reidentification-retail-xxx`` from `Reidentification Models list `__), replace the name of the model in the code below. .. code:: ipython3 diff --git a/docs/optimization_guide/dldt_deployment_optimization_common.md b/docs/optimization_guide/dldt_deployment_optimization_common.md index b4ad86fb593cd9..347c8417f1c9e9 100644 --- a/docs/optimization_guide/dldt_deployment_optimization_common.md +++ b/docs/optimization_guide/dldt_deployment_optimization_common.md @@ -60,7 +60,7 @@ Below are example-codes for the regular and async-based approaches to compare: The technique can be generalized to any available parallel slack. For example, you can do inference and simultaneously encode the resulting or previous frames or run further inference, like emotion detection on top of the face detection results. -Refer to the `Object Detection C++ Demo `__ , `Object Detection Python Demo `__ (latency-oriented Async API showcase) and :doc:`Benchmark App Sample ` for complete examples of the Async API in action. +Refer to the `Object Detection C++ Demo `__ , `Object Detection Python Demo `__ (latency-oriented Async API showcase) and :doc:`Benchmark App Sample ` for complete examples of the Async API in action. .. note:: diff --git a/docs/resources/prerelease_information.md b/docs/resources/prerelease_information.md index c8a90cc5815062..42f85cb6e6df62 100644 --- a/docs/resources/prerelease_information.md +++ b/docs/resources/prerelease_information.md @@ -79,7 +79,7 @@ Please file a github Issue on these with the label “pre-release” so we can g * PyTorch FE: * Added support for 6 new operations. To know how to enjoy PyTorch models conversion follow - this `Link `__ + this `Link `__ * aten::concat * aten::masked_scatter diff --git a/samples/c/hello_nv12_input_classification/README.md b/samples/c/hello_nv12_input_classification/README.md index 9bd811e7143971..0056c897448ad1 100644 --- a/samples/c/hello_nv12_input_classification/README.md +++ b/samples/c/hello_nv12_input_classification/README.md @@ -140,7 +140,7 @@ See Also - :doc:`Using OpenVINO™ Samples ` - :doc:`Model Downloader ` - :doc:`Convert a Model ` -- `C API Reference `__ +- `C API Reference `__ @endsphinxdirective diff --git a/samples/python/classification_sample_async/README.md b/samples/python/classification_sample_async/README.md index 57d26c9e6823ae..274b05e04e6fca 100644 --- a/samples/python/classification_sample_async/README.md +++ b/samples/python/classification_sample_async/README.md @@ -34,11 +34,11 @@ Models with only 1 input and output are supported. +--------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------+ | Feature | API | Description | +====================+===========================================================================================================================================================================================================+===========================+ - | Asynchronous Infer | `openvino.runtime.AsyncInferQueue `__ , | Do asynchronous inference | - | | `openvino.runtime.AsyncInferQueue.set_callback `__ , | | - | | `openvino.runtime.AsyncInferQueue.start_async `__ , | | - | | `openvino.runtime.AsyncInferQueue.wait_all `__ , | | - | | `openvino.runtime.InferRequest.results `__ | | + | Asynchronous Infer | `openvino.runtime.AsyncInferQueue `__ , | Do asynchronous inference | + | | `openvino.runtime.AsyncInferQueue.set_callback `__ , | | + | | `openvino.runtime.AsyncInferQueue.start_async `__ , | | + | | `openvino.runtime.AsyncInferQueue.wait_all `__ , | | + | | `openvino.runtime.InferRequest.results `__ | | +--------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------+ Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python Sample `. diff --git a/samples/python/hello_classification/README.md b/samples/python/hello_classification/README.md index 1001948656d62a..8e97151fb54938 100644 --- a/samples/python/hello_classification/README.md +++ b/samples/python/hello_classification/README.md @@ -34,23 +34,23 @@ Models with only 1 input and output are supported. +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Feature | API | Description | +=============================+===========================================================================================================================================================================================================================================+============================================================================================================================================================================================+ - | Basic Infer Flow | `openvino.runtime.Core `__ , | | - | | `openvino.runtime.Core.read_model `__ , | | - | | `openvino.runtime.Core.compile_model `__ | Common API to do inference | + | Basic Infer Flow | `openvino.runtime.Core `__ , | | + | | `openvino.runtime.Core.read_model `__ , | | + | | `openvino.runtime.Core.compile_model `__ | Common API to do inference | +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Synchronous Infer | `openvino.runtime.CompiledModel.infer_new_request `__ | Do synchronous inference | + | Synchronous Infer | `openvino.runtime.CompiledModel.infer_new_request `__ | Do synchronous inference | +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Operations | `openvino.runtime.Model.inputs `__ , | Managing of model | - | | `openvino.runtime.Model.outputs `__ | | + | Model Operations | `openvino.runtime.Model.inputs `__ , | Managing of model | + | | `openvino.runtime.Model.outputs `__ | | +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Preprocessing | `openvino.preprocess.PrePostProcessor `__ , | Set image of the original size as input for a model with other input size. Resize and layout conversions will be performed automatically by the corresponding plugin just before inference | - | | `openvino.preprocess.InputTensorInfo.set_element_type `__ , | | - | | `openvino.preprocess.InputTensorInfo.set_layout `__ , | | - | | `openvino.preprocess.InputTensorInfo.set_spatial_static_shape `__ , | | - | | `openvino.preprocess.PreProcessSteps.resize `__ , | | - | | `openvino.preprocess.InputModelInfo.set_layout `__ , | | - | | `openvino.preprocess.OutputTensorInfo.set_element_type `__ , | | - | | `openvino.preprocess.PrePostProcessor.build `__ | | + | Preprocessing | `openvino.preprocess.PrePostProcessor `__ , | Set image of the original size as input for a model with other input size. Resize and layout conversions will be performed automatically by the corresponding plugin just before inference | + | | `openvino.preprocess.InputTensorInfo.set_element_type `__ , | | + | | `openvino.preprocess.InputTensorInfo.set_layout `__ , | | + | | `openvino.preprocess.InputTensorInfo.set_spatial_static_shape `__ , | | + | | `openvino.preprocess.PreProcessSteps.resize `__ , | | + | | `openvino.preprocess.InputModelInfo.set_layout `__ , | | + | | `openvino.preprocess.OutputTensorInfo.set_element_type `__ , | | + | | `openvino.preprocess.PrePostProcessor.build `__ | | +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. tab-item:: Sample Code diff --git a/samples/python/hello_query_device/README.md b/samples/python/hello_query_device/README.md index 0745bcd646aba2..a43ec4bf01ff58 100644 --- a/samples/python/hello_query_device/README.md +++ b/samples/python/hello_query_device/README.md @@ -29,11 +29,11 @@ This sample demonstrates how to show OpenVINO™ Runtime devices and prints thei +---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------+ | Feature | API | Description | +=======================================+============================================================================================================================================================================================+========================================+ - | Basic | `openvino.runtime.Core `__ | Common API | + | Basic | `openvino.runtime.Core `__ | Common API | +---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------+ - | Query Device | `openvino.runtime.Core.available_devices `__ , | Get device properties | - | | `openvino.runtime.Core.get_metric `__ , | | - | | `openvino.runtime.Core.get_config `__ | | + | Query Device | `openvino.runtime.Core.available_devices `__ , | Get device properties | + | | `openvino.runtime.Core.get_metric `__ , | | + | | `openvino.runtime.Core.get_config `__ | | +---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------+ .. tab-item:: Sample Code diff --git a/samples/python/hello_reshape_ssd/README.md b/samples/python/hello_reshape_ssd/README.md index 0b5587f51af457..90deed2c269ffd 100644 --- a/samples/python/hello_reshape_ssd/README.md +++ b/samples/python/hello_reshape_ssd/README.md @@ -37,10 +37,10 @@ Models with only 1 input and output are supported. +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ | Feature | API | Description | +====================================+================================================================================================================================================================================+======================================+ - | Model Operations | `openvino.runtime.Model.reshape `__ , | Managing of model | - | | `openvino.runtime.Model.input `__ , | | - | | `openvino.runtime.Output.get_any_name `__ , | | - | | `openvino.runtime.PartialShape `__ | | + | Model Operations | `openvino.runtime.Model.reshape `__ , | Managing of model | + | | `openvino.runtime.Model.input `__ , | | + | | `openvino.runtime.Output.get_any_name `__ , | | + | | `openvino.runtime.PartialShape `__ | | +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python* Sample `. diff --git a/samples/python/model_creation_sample/README.md b/samples/python/model_creation_sample/README.md index 72de533524747a..bd2aa44b12e0f8 100644 --- a/samples/python/model_creation_sample/README.md +++ b/samples/python/model_creation_sample/README.md @@ -33,19 +33,19 @@ This sample demonstrates how to run inference using a :doc:`model `__ , | Managing of model | - | | `openvino.runtime.set_batch `__ , | | - | | `openvino.runtime.Model.input `__ | | + | Model Operations | `openvino.runtime.Model `__ , | Managing of model | + | | `openvino.runtime.set_batch `__ , | | + | | `openvino.runtime.Model.input `__ | | +------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+ - | Opset operations | `openvino.runtime.op.Parameter `__ , | Description of a model topology using OpenVINO Python API | - | | `openvino.runtime.op.Constant `__ , | | - | | `openvino.runtime.opset8.convolution `__ , | | - | | `openvino.runtime.opset8.add `__ , | | - | | `openvino.runtime.opset1.max_pool `__ , | | - | | `openvino.runtime.opset8.reshape `__ , | | - | | `openvino.runtime.opset8.matmul `__ , | | - | | `openvino.runtime.opset8.relu `__ , | | - | | `openvino.runtime.opset8.softmax `__ | | + | Opset operations | `openvino.runtime.op.Parameter `__ , | Description of a model topology using OpenVINO Python API | + | | `openvino.runtime.op.Constant `__ , | | + | | `openvino.runtime.opset8.convolution `__ , | | + | | `openvino.runtime.opset8.add `__ , | | + | | `openvino.runtime.opset1.max_pool `__ , | | + | | `openvino.runtime.opset8.reshape `__ , | | + | | `openvino.runtime.opset8.matmul `__ , | | + | | `openvino.runtime.opset8.relu `__ , | | + | | `openvino.runtime.opset8.softmax `__ | | +------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+ Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python* Sample `. diff --git a/samples/python/speech_sample/README.md b/samples/python/speech_sample/README.md index 55408247fc379d..6ededaabaa7b6b 100644 --- a/samples/python/speech_sample/README.md +++ b/samples/python/speech_sample/README.md @@ -45,17 +45,17 @@ The sample works with Kaldi ARK or Numpy* uncompressed NPZ files, so it does not +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ | Feature | API | Description | +===================================================================+================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+=======================================================================+ - | Import/Export Model | `openvino.runtime.Core.import_model `__ , `openvino.runtime.CompiledModel.export_model `__ | The GNA plugin supports loading and saving of the GNA-optimized model | + | Import/Export Model | `openvino.runtime.Core.import_model `__ , `openvino.runtime.CompiledModel.export_model `__ | The GNA plugin supports loading and saving of the GNA-optimized model | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Model Operations | `openvino.runtime.Model.add_outputs `__ , `openvino.runtime.set_batch `__ , `openvino.runtime.CompiledModel.inputs `__ , `openvino.runtime.CompiledModel.outputs `__ , `openvino.runtime.ConstOutput.any_name `__ | Managing of model: configure batch_size, input and output tensors | + | Model Operations | `openvino.runtime.Model.add_outputs `__ , `openvino.runtime.set_batch `__ , `openvino.runtime.CompiledModel.inputs `__ , `openvino.runtime.CompiledModel.outputs `__ , `openvino.runtime.ConstOutput.any_name `__ | Managing of model: configure batch_size, input and output tensors | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Synchronous Infer | `openvino.runtime.CompiledModel.create_infer_request `__ , `openvino.runtime.InferRequest.infer `__ | Do synchronous inference | + | Synchronous Infer | `openvino.runtime.CompiledModel.create_infer_request `__ , `openvino.runtime.InferRequest.infer `__ | Do synchronous inference | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | InferRequest Operations | `openvino.runtime.InferRequest.get_input_tensor `__ , `openvino.runtime.InferRequest.model_outputs `__ , `openvino.runtime.InferRequest.model_inputs `__ , | Get info about model using infer request API | + | InferRequest Operations | `openvino.runtime.InferRequest.get_input_tensor `__ , `openvino.runtime.InferRequest.model_outputs `__ , `openvino.runtime.InferRequest.model_inputs `__ , | Get info about model using infer request API | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | InferRequest Operations | `openvino.runtime.InferRequest.query_state `__ , `openvino.runtime.VariableState.reset `__ | Gets and resets CompiledModel state control | + | InferRequest Operations | `openvino.runtime.InferRequest.query_state `__ , `openvino.runtime.VariableState.reset `__ | Gets and resets CompiledModel state control | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Profiling | `openvino.runtime.InferRequest.profiling_info `__ , `openvino.runtime.ProfilingInfo.real_time `__ | Get infer request profiling info | + | Profiling | `openvino.runtime.InferRequest.profiling_info `__ , `openvino.runtime.ProfilingInfo.real_time `__ | Get infer request profiling info | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python* Sample `. diff --git a/src/README.md b/src/README.md index 380d24fe71968f..7d3d028bd00736 100644 --- a/src/README.md +++ b/src/README.md @@ -59,7 +59,7 @@ OpenVINO provides bindings for different languages. To get the full list of supp ## Core developer topics * [OpenVINO architecture](./docs/architecture.md) - * [Plugin Development](https://docs.openvino.ai/2023.0/openvino_docs_ie_plugin_dg_overview.html) + * [Plugin Development](https://docs.openvino.ai/2023.1/openvino_docs_ie_plugin_dg_overview.html) * [Thread safety](#todo) * [Performance](#todo) diff --git a/src/bindings/c/README.md b/src/bindings/c/README.md index 225c976d49c4d2..1367c2b8b28301 100644 --- a/src/bindings/c/README.md +++ b/src/bindings/c/README.md @@ -25,7 +25,7 @@ People from the [openvino-c-api-maintainers](https://github.com/orgs/openvinotoo OpenVINO C API has the following structure: * [docs](./docs) contains developer documentation for OpenVINO C APIs. - * [include](./include) contains all provided C API headers. [Learn more](https://docs.openvino.ai/2023.0/api/api_reference.html). + * [include](./include) contains all provided C API headers. [Learn more](https://docs.openvino.ai/2023.1/api/api_reference.html). * [src](./src) contains the implementations of all C APIs. * [tests](./tests) contains all tests for OpenVINO C APIs. [Learn more](./docs/how_to_write_unit_test.md). @@ -33,7 +33,7 @@ OpenVINO C API has the following structure: ## Tutorials -* [How to integrate OpenVINO C API with Your Application](https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_Integrate_OV_with_your_application.html) +* [How to integrate OpenVINO C API with Your Application](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_Integrate_OV_with_your_application.html) * [How to wrap OpenVINO objects with C](./docs/how_to_wrap_openvino_objects_with_c.md) * [How to wrap OpenVINO interfaces with C](./docs/how_to_wrap_openvino_interfaces_with_c.md) * [Samples implemented by OpenVINO C API](../../../samples/c/) @@ -47,5 +47,5 @@ See [CONTRIBUTING](../../../CONTRIBUTING.md) for details. ## See also * [OpenVINO™ README](../../../README.md) - * [OpenVINO Runtime C API User Guide](https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_Integrate_OV_with_your_application.html) - * [Migration of OpenVINO C API](https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html) + * [OpenVINO Runtime C API User Guide](https://docs.openvino.ai/2023.1/openvino_docs_OV_UG_Integrate_OV_with_your_application.html) + * [Migration of OpenVINO C API](https://docs.openvino.ai/2023.1/openvino_2_0_transition_guide.html) diff --git a/src/bindings/c/docs/how_to_wrap_openvino_interfaces_with_c.md b/src/bindings/c/docs/how_to_wrap_openvino_interfaces_with_c.md index 435e2e35529140..8508e3e87ce654 100644 --- a/src/bindings/c/docs/how_to_wrap_openvino_interfaces_with_c.md +++ b/src/bindings/c/docs/how_to_wrap_openvino_interfaces_with_c.md @@ -78,4 +78,4 @@ The tensor create needs to specify the shape info, so C shape need to be convert ## See also * [OpenVINO™ README](../../../../README.md) * [C API developer guide](../README.md) - * [C API Reference](https://docs.openvino.ai/2023.0/api/api_reference.html) + * [C API Reference](https://docs.openvino.ai/2023.1/api/api_reference.html) diff --git a/src/bindings/c/docs/how_to_wrap_openvino_objects_with_c.md b/src/bindings/c/docs/how_to_wrap_openvino_objects_with_c.md index 092f37138ac723..a6c9982ca475fe 100644 --- a/src/bindings/c/docs/how_to_wrap_openvino_objects_with_c.md +++ b/src/bindings/c/docs/how_to_wrap_openvino_objects_with_c.md @@ -73,4 +73,4 @@ https://github.com/openvinotoolkit/openvino/blob/d96c25844d6cfd5ad131539c8a09282 ## See also * [OpenVINO™ README](../../../../README.md) * [C API developer guide](../README.md) - * [C API Reference](https://docs.openvino.ai/2023.0/api/api_reference.html) \ No newline at end of file + * [C API Reference](https://docs.openvino.ai/2023.1/api/api_reference.html) \ No newline at end of file diff --git a/src/bindings/c/docs/how_to_write_unit_test.md b/src/bindings/c/docs/how_to_write_unit_test.md index 0cc2f0e1681454..2db1a7ac88ee3d 100644 --- a/src/bindings/c/docs/how_to_write_unit_test.md +++ b/src/bindings/c/docs/how_to_write_unit_test.md @@ -14,5 +14,5 @@ https://github.com/openvinotoolkit/openvino/blob/d96c25844d6cfd5ad131539c8a09282 ## See also * [OpenVINO™ README](../../../../README.md) * [C API developer guide](../README.md) - * [C API Reference](https://docs.openvino.ai/2023.0/api/api_reference.html) + * [C API Reference](https://docs.openvino.ai/2023.1/api/api_reference.html) diff --git a/src/plugins/auto/docs/integration.md b/src/plugins/auto/docs/integration.md index 9b719b01f9ead5..8a567a4614ece6 100644 --- a/src/plugins/auto/docs/integration.md +++ b/src/plugins/auto/docs/integration.md @@ -1,7 +1,7 @@ # AUTO Plugin Integration ## Implement a New Plugin -Refer to [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/latest/openvino_docs_ie_plugin_dg_overview.html) for detailed information on how to implement a new plugin. +Refer to [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.1/openvino_docs_ie_plugin_dg_overview.html) for detailed information on how to implement a new plugin. Query model method `ov::IPlugin::query_model()` is recommended as it is important for AUTO to quickly make decisions and save selection time. diff --git a/src/plugins/proxy/README.md b/src/plugins/proxy/README.md index 5d5ae6b136c500..fbd4caf627dd24 100644 --- a/src/plugins/proxy/README.md +++ b/src/plugins/proxy/README.md @@ -47,5 +47,5 @@ After the creation the proxy plugin has next properties: * [OpenVINO Core Components](../../README.md) * [OpenVINO Plugins](../README.md) * [Developer documentation](../../../docs/dev/index.md) - * [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/latest/openvino_docs_ie_plugin_dg_overview.html) + * [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.1/openvino_docs_ie_plugin_dg_overview.html) diff --git a/tools/pot/README.md b/tools/pot/README.md index 15230642719a06..766d6e10439c55 100644 --- a/tools/pot/README.md +++ b/tools/pot/README.md @@ -12,14 +12,14 @@ and run on CPU with the OpenVINO™. Figure below shows the optimization workflow: ![](docs/images/workflow_simple.svg) -To get started with POT tool refer to the corresponding OpenVINO™ [documentation](https://docs.openvino.ai/2023.0/openvino_docs_model_optimization_guide.html). +To get started with POT tool refer to the corresponding OpenVINO™ [documentation](https://docs.openvino.ai/2023.1/openvino_docs_model_optimization_guide.html). ## Installation ### From PyPI -POT is distributed as a part of OpenVINO™ Development Tools package. For installation instruction please refer to this [document](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_install_dev_tools.html). +POT is distributed as a part of OpenVINO™ Development Tools package. For installation instruction please refer to this [document](https://docs.openvino.ai/2023.1/openvino_docs_install_guides_install_dev_tools.html). ### From GitHub -As prerequisites, you should install [OpenVINO™ Runtime](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_install_runtime.html) and other dependencies such as [Model Optimizer](https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) and [Accuracy Checker](https://docs.openvino.ai/2023.0/omz_tools_accuracy_checker.html). +As prerequisites, you should install [OpenVINO™ Runtime](https://docs.openvino.ai/2023.1/openvino_docs_install_guides_overview.html) and other dependencies such as [Model Optimizer](https://docs.openvino.ai/2023.1/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) and [Accuracy Checker](https://docs.openvino.ai/2023.1/omz_tools_accuracy_checker.html). To install POT from source: - Clone OpenVINO repository @@ -40,7 +40,7 @@ After installation POT is available as a Python library under `openvino.tools.po OpenVINO provides several examples to demonstrate the POT optimization workflow: * Command-line example: - * [Quantization of Image Classification model](https://docs.openvino.ai/2023.0/pot_configs_examples_README.html) + * [Quantization of Image Classification model](https://docs.openvino.ai/2023.1/pot_configs_examples_README.html) * API tutorials: * [Quantization of Image Classification model](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/301-tensorflow-training-openvino) * [Quantization of Object Detection model from Model Zoo](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/111-yolov5-quantization-migration) @@ -55,4 +55,4 @@ OpenVINO provides several examples to demonstrate the POT optimization workflow: ## See Also -* [Performance Benchmarks](https://docs.openvino.ai/2023.0/openvino_docs_performance_benchmarks.html) +* [Performance Benchmarks](https://docs.openvino.ai/2023.1/openvino_docs_performance_benchmarks.html) diff --git a/tools/pot/docs/ModelRepresentation.md b/tools/pot/docs/ModelRepresentation.md index 8bb9f3d3fbc8aa..c6f693cf30e1d6 100644 --- a/tools/pot/docs/ModelRepresentation.md +++ b/tools/pot/docs/ModelRepresentation.md @@ -8,7 +8,7 @@ Currently, there are two groups of optimization methods that can change the IR a ## Representation of quantized models -The OpenVINO Toolkit represents all the quantized models using the so-called [FakeQuantize](https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_prepare_model_convert_model_Legacy_IR_Layers_Catalog_Spec.html#fakequantize-layer) operation. This operation is very expressive and allows mapping values from arbitrary input and output ranges. We project (discretize) the input values to the low-precision data type using affine transformation (with clamp and rounding) and then re-project discrete values back to the original range and data type. It can be considered as an emulation of the quantization/dequantization process which happens at runtime. The figure below shows a part of the DL model, namely the Convolutional layer, that undergoes various transformations, from being a floating-point model to an integer model executed in the OpenVINO runtime. Column 2 of this figure below shows a model quantized with [Neural Network Compression Framework (NNCF)](https://github.com/openvinotoolkit/nncf). +The OpenVINO Toolkit represents all the quantized models using the so-called [FakeQuantize](https://docs.openvino.ai/2021.4/openvino_docs_MO_DG_prepare_model_convert_model_Legacy_IR_Layers_Catalog_Spec.html#fakequantize-layer) operation. This operation is very expressive and allows mapping values from arbitrary input and output ranges. We project (discretize) the input values to the low-precision data type using affine transformation (with clamp and rounding) and then re-project discrete values back to the original range and data type. It can be considered as an emulation of the quantization/dequantization process which happens at runtime. The figure below shows a part of the DL model, namely the Convolutional layer, that undergoes various transformations, from being a floating-point model to an integer model executed in the OpenVINO runtime. Column 2 of this figure below shows a model quantized with [Neural Network Compression Framework (NNCF)](https://github.com/openvinotoolkit/nncf). ![](images/model_flow.png) To reduce memory footprint weights of quantized models are transformed to a target data type, e.g. in the case of 8-bit quantization, this is int8. During this transformation, the floating-point weights tensor and one of the FakeQuantize operations that correspond to it are replaced with 8-bit weight tensor and the sequence of Convert, Subtract, Multiply operations that represent the typecast and dequantization parameters (scale and zero-point) as it is shown in column 3 of the figure. From 15685e01411eca7994762931736fe28973d7071c Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 4 Sep 2023 18:09:26 +0400 Subject: [PATCH 08/16] Remove legacy API from snippets tests (#19577) * Remove legacy API from snippets tests * Fixed comment --- src/common/snippets/tests/src/lowering_utils.cpp | 14 +++++++------- .../snippets/tests/src/pass/movebroadcast.cpp | 3 --- .../tests/src/pass/precision_propagation.cpp | 1 - .../src/pass/softmax_reshape_elimination.cpp | 3 --- src/common/snippets/tests/src/precomp.hpp | 16 +++++++--------- 5 files changed, 14 insertions(+), 23 deletions(-) diff --git a/src/common/snippets/tests/src/lowering_utils.cpp b/src/common/snippets/tests/src/lowering_utils.cpp index e0f05dd22cee29..c24948f8074da5 100644 --- a/src/common/snippets/tests/src/lowering_utils.cpp +++ b/src/common/snippets/tests/src/lowering_utils.cpp @@ -86,15 +86,15 @@ std::shared_ptr LoweringTests::getSubgraph(const std for (const auto& op : f->get_ops()) { bool is_subgraph = is_type(op); if (is_subgraph) { - NGRAPH_CHECK(subgraph.use_count() == 0, - "Functions provided for lowering tests contains more than one subgraph."); + OPENVINO_ASSERT(subgraph.use_count() == 0, + "Functions provided for lowering tests contains more than one subgraph."); subgraph = as_type_ptr(op); } - NGRAPH_CHECK(is_subgraph || - is_type(op) || - is_type(op) || - is_type(op), - "Functions provided for lowering tests is not fully tokenizable"); + OPENVINO_ASSERT(is_subgraph || + is_type(op) || + is_type(op) || + is_type(op), + "Models provided for lowering tests is not fully tokenizable"); } return subgraph; } diff --git a/src/common/snippets/tests/src/pass/movebroadcast.cpp b/src/common/snippets/tests/src/pass/movebroadcast.cpp index c6dc87120740da..3779cfaae9f532 100644 --- a/src/common/snippets/tests/src/pass/movebroadcast.cpp +++ b/src/common/snippets/tests/src/pass/movebroadcast.cpp @@ -4,9 +4,6 @@ #include -#include "ngraph/function.hpp" -#include "ngraph/pass/manager.hpp" - #include "snippets/snippets_isa.hpp" #include "snippets/pass/insert_movebroadcast.hpp" diff --git a/src/common/snippets/tests/src/pass/precision_propagation.cpp b/src/common/snippets/tests/src/pass/precision_propagation.cpp index d331eb27ec8b3e..4a93f804ec84cd 100644 --- a/src/common/snippets/tests/src/pass/precision_propagation.cpp +++ b/src/common/snippets/tests/src/pass/precision_propagation.cpp @@ -5,7 +5,6 @@ #include "pass/precision_propagation.hpp" #include -#include "ngraph/pass/validate.hpp" #include "snippets/pass/propagate_precision.hpp" #include "snippets/op/convert_saturation.hpp" #include "common_test_utils/common_utils.hpp" diff --git a/src/common/snippets/tests/src/pass/softmax_reshape_elimination.cpp b/src/common/snippets/tests/src/pass/softmax_reshape_elimination.cpp index 0f191adb5ac3b5..60122311a4d47f 100644 --- a/src/common/snippets/tests/src/pass/softmax_reshape_elimination.cpp +++ b/src/common/snippets/tests/src/pass/softmax_reshape_elimination.cpp @@ -4,9 +4,6 @@ #include -#include -#include - #include #include diff --git a/src/common/snippets/tests/src/precomp.hpp b/src/common/snippets/tests/src/precomp.hpp index b783522d287af5..4b3d5faf3aa028 100644 --- a/src/common/snippets/tests/src/precomp.hpp +++ b/src/common/snippets/tests/src/precomp.hpp @@ -4,12 +4,11 @@ #pragma once -#include -#include "openvino/core/node.hpp" -#include -#include - #include +#include +#include +#include +#include #include #include #include @@ -24,7 +23,6 @@ #include #include -#include -#include -#include -#include +#include "common_test_utils/ov_test_utils.hpp" +#include "openvino/core/node.hpp" +#include "snippets_helpers.hpp" From fef4d4d6419e83a7d2d30ded4e8431f4cd0defa8 Mon Sep 17 00:00:00 2001 From: Evgenya Stepyreva Date: Mon, 4 Sep 2023 19:11:09 +0400 Subject: [PATCH 09/16] Auto batch lost label fix (#19535) * Restored opset1::Reshape label peropagation for -1 special value * Lets opset1::Reshape keep same shape infer. Makes FindBatch transformation keep labels in output shapes of Result node * uses Parameter from correct namespace --- .../dimension_tracking.hpp | 4 +- .../dimension_tracking.cpp | 83 ++++++++++++------- .../dimension_tracking.cpp | 34 ++++++++ src/core/tests/type_prop/reshape.cpp | 12 +++ src/plugins/auto_batch/src/plugin.cpp | 4 +- 5 files changed, 102 insertions(+), 35 deletions(-) diff --git a/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp b/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp index e890553085c79b..a4c655a9591ed8 100644 --- a/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp @@ -45,8 +45,10 @@ void mark_layout_independent_batch(const std::shared_ptr& P2Btype& map); void mark_with_unique_dimension_labels(const std::shared_ptr& m, const ov::DimensionTracker& dt); void restore_original_dimensions( + const std::shared_ptr& model, const std::map, ov::PartialShape>& parameter_to_shape, - bool leave_batch_dynamic = true); + bool leave_batch_dynamic = true, + bool clear_labels = false); bool check_batch_tracks_through_all_the_nodes(const std::shared_ptr& m); P2Btype find_batch(const std::shared_ptr& m); bool detach_detection_output(const std::shared_ptr& f); diff --git a/src/common/transformations/src/transformations/common_optimizations/dimension_tracking.cpp b/src/common/transformations/src/transformations/common_optimizations/dimension_tracking.cpp index d075cfca7291ce..abf433436133e5 100644 --- a/src/common/transformations/src/transformations/common_optimizations/dimension_tracking.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/dimension_tracking.cpp @@ -21,15 +21,16 @@ #include "openvino/op/result.hpp" #include "openvino/op/shape_of.hpp" -void ov::batch_util::mark_with_unique_dimension_labels(const std::shared_ptr& f, +void ov::batch_util::mark_with_unique_dimension_labels(const std::shared_ptr& m, const ov::DimensionTracker& dt) { ov::label_t i = 1; - for (auto& parameter : f->get_parameters()) { + for (auto& parameter : m->get_parameters()) { ov::PartialShape new_shape = ov::PartialShape::dynamic(parameter->get_partial_shape().rank()); for (auto& dim : new_shape) dt.set_up_for_tracking(dim, i++); parameter->set_partial_shape(new_shape); } + m->validate_nodes_and_infer_types(); } void ov::batch_util::mark_batch(const std::shared_ptr& parameter, @@ -161,15 +162,17 @@ P2Btype ov::batch_util::find_batch(const std::shared_ptr& f) { for (auto& result : layout_independent_results) // there are no layout obvious operations on the Parameter-Result path - // considering the outer-most matching dimension is batch + // considering the outermost matching dimension is batch mark_layout_independent_batch(parameter, result->shared_from_this(), parameter_to_batch_labels); } return parameter_to_batch_labels; } void ov::batch_util::restore_original_dimensions( + const std::shared_ptr& model, const std::map, ov::PartialShape>& parameter_to_shape, - bool leave_batch_dynamic) { + bool leave_batch_dynamic, + bool clear_labels) { for (const auto& item : parameter_to_shape) { const auto& batch_marked_shape = item.first->get_partial_shape(); auto original_shape = item.second; @@ -180,11 +183,34 @@ void ov::batch_util::restore_original_dimensions( if (const auto& label = ov::DimensionTracker::get_label(batch_marked_shape[n])) { if (leave_batch_dynamic) original_shape[n] = Dimension::dynamic(); - ov::DimensionTracker::set_label(original_shape[n], label); + if (!clear_labels) + ov::DimensionTracker::set_label(original_shape[n], label); } } item.first->set_partial_shape(original_shape); } + std::unordered_map, ov::PartialShape> output_to_shape; + if (!clear_labels) { + for (const auto& result : model->get_results()) + output_to_shape[result] = result->get_output_partial_shape(0); + } + + model->validate_nodes_and_infer_types(); + + if (!clear_labels) { + for (const auto& item : output_to_shape) { + auto labeled_shape = item.second, current_shape = item.first->get_output_partial_shape(0); + auto labeled_rank = labeled_shape.rank(), current_rank = current_shape.rank(); + if (labeled_rank.is_static() && current_rank.is_static() && labeled_rank == current_rank) { + for (size_t i = 0; i < labeled_shape.size(); ++i) { + auto label = ov::DimensionTracker::get_label(labeled_shape[i]); + if (label != ov::no_label) + ov::DimensionTracker::set_label(current_shape[i], label); + } + item.first->set_output_type(0, item.first->get_element_type(), current_shape); + } + } + } } bool ov::batch_util::check_batch_tracks_through_all_the_nodes(const std::shared_ptr& f) { @@ -252,6 +278,19 @@ bool ov::batch_util::detach_detection_output(const std::shared_ptr& f return !new_outputs.empty() || !outputs_to_delete.empty(); } +std::map, ov::PartialShape> collect_original_input_shapes( + const std::shared_ptr& m) { + const auto& parameters = m->get_parameters(); + std::map, ov::PartialShape> parameter_to_shape; + for (const auto& parameter : parameters) { + auto shape = parameter->get_partial_shape(); + if (shape.rank().is_dynamic()) + return {}; + parameter_to_shape[parameter] = shape; + } + return parameter_to_shape; +} + bool ov::pass::FindBatch::run_on_model(const std::shared_ptr& m) { RUN_ON_MODEL_SCOPE(FindBatch); auto te = std::make_shared(); @@ -261,38 +300,20 @@ bool ov::pass::FindBatch::run_on_model(const std::shared_ptr& m) { if (detach_do) model_has_changed |= batch_util::detach_detection_output(m); - const auto& parameters = m->get_parameters(); - std::map, PartialShape> parameter_to_shape; - for (const auto& parameter : parameters) { - auto shape = parameter->get_partial_shape(); - if (shape.rank().is_dynamic()) - return model_has_changed; - parameter_to_shape[parameter] = shape; - } + auto parameter_to_shape = collect_original_input_shapes(m); + if (parameter_to_shape.empty()) + return model_has_changed; ov::batch_util::mark_with_unique_dimension_labels(m, dt); - m->validate_nodes_and_infer_types(); ov::batch_util::find_batch(m); if (!track) { - ov::batch_util::restore_original_dimensions(parameter_to_shape, false); - m->validate_nodes_and_infer_types(); - return true; + ov::batch_util::restore_original_dimensions(m, parameter_to_shape, false, false); + return false; // we have called validation on this model already } - - ov::batch_util::restore_original_dimensions(parameter_to_shape); - - m->validate_nodes_and_infer_types(); - + ov::batch_util::restore_original_dimensions(m, parameter_to_shape); bool failed_to_propagate_batch = ov::batch_util::check_batch_tracks_through_all_the_nodes(m); - - if (failed_to_propagate_batch) { // restore original input shape with labels - for (const auto& item : parameter_to_shape) - item.first->set_partial_shape(item.second); - } else { // restore original input shape with batch labels - ov::batch_util::restore_original_dimensions(parameter_to_shape, false); - } - m->validate_nodes_and_infer_types(); - return true; + ov::batch_util::restore_original_dimensions(m, parameter_to_shape, false, failed_to_propagate_batch); + return false; // we have called validation on this model already } diff --git a/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp b/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp index 4db696f0e7e96f..31967c71ac76a4 100644 --- a/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp +++ b/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp @@ -93,6 +93,40 @@ TEST(TransformationTests, AutoBatch_FindBatch_Transpose_and_Convolution) { ASSERT_TRUE(!ov::DimensionTracker::get_label(out_shape[3])) << out_shape; } +TEST(TransformationTests, AutoBatch_LabelPropagation_Convolution_Reshape) { + auto data = std::make_shared(ov::element::f32, ov::Shape{1, 4, 6, 8}); + + const auto& filters = std::make_shared(ov::element::f32, ov::Shape{1, 4, 3, 3}); + const auto& conv = std::make_shared(data, + filters, + ov::Strides{1, 1}, + ov::CoordinateDiff{0, 0}, + ov::CoordinateDiff{0, 0}, + ov::Strides{1, 1}); + const auto& reshape = + std::make_shared(conv, + ov::opset1::Constant::create(ov::element::i64, {3}, {-1, 4, 6}), + false); + const auto& model = std::make_shared(ov::NodeVector{reshape}, ov::ParameterVector{data}); + + ov::pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(model); + ASSERT_NO_THROW(check_rt_info(model)); + + const auto& shape = data->get_partial_shape(); + ASSERT_TRUE(ov::DimensionTracker::get_label(shape[0])) << shape; + ASSERT_TRUE(!ov::DimensionTracker::get_label(shape[1])) << shape; + ASSERT_TRUE(!ov::DimensionTracker::get_label(shape[2])) << shape; + ASSERT_TRUE(!ov::DimensionTracker::get_label(shape[3])) << shape; + + const auto& out_shape = model->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(ov::DimensionTracker::get_label(out_shape[0])) << out_shape; + ASSERT_TRUE(!ov::DimensionTracker::get_label(out_shape[1])) << out_shape; + ASSERT_TRUE(!ov::DimensionTracker::get_label(out_shape[2])) << out_shape; +} + TEST(TransformationTests, AutoBatch_FindBatch_SingleMultiply) { const auto& data = std::make_shared(ov::element::f32, ov::Shape{1, 4, 10, 10}); diff --git a/src/core/tests/type_prop/reshape.cpp b/src/core/tests/type_prop/reshape.cpp index 77f475b5e2026a..97ee3ab76edced 100644 --- a/src/core/tests/type_prop/reshape.cpp +++ b/src/core/tests/type_prop/reshape.cpp @@ -31,6 +31,18 @@ TEST(type_prop, static_value_propagation) { ASSERT_EQ(r->get_shape(), (Shape{1, 2, 3})); } +TEST(type_prop, reshape_static_dimension_stops_label_propagation_for_auto_batch_case) { + auto shape = ov::PartialShape({1, 1280, 1, 1}); + ov::DimensionTracker::set_label(shape[0], 1); + auto param = make_shared(element::f32, shape); + auto pattern = op::v0::Constant::create(element::i64, {2}, {-1, 1280}); + auto r = make_shared(param, pattern, false); + + ASSERT_EQ(r->get_element_type(), element::f32); + ASSERT_EQ(r->get_shape(), (Shape{1, 1280})); + ASSERT_EQ(ov::no_label, ov::DimensionTracker::get_label(r->get_output_partial_shape(0)[0])); +} + TEST(type_prop, interval_value_propagation) { auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); auto shape_of = make_shared(param); diff --git a/src/plugins/auto_batch/src/plugin.cpp b/src/plugins/auto_batch/src/plugin.cpp index d8bd13d428f06d..2447330ba8efcd 100644 --- a/src/plugins/auto_batch/src/plugin.cpp +++ b/src/plugins/auto_batch/src/plugin.cpp @@ -208,9 +208,7 @@ std::shared_ptr Plugin::compile_model(const std::shared_ptr< "Auto-batching operates only networks with inputs/outputs batched by 0th dimension"); } } - const auto& results = cloned_model->get_results(); - for (size_t output_id = 0; output_id < results.size(); output_id++) { - const auto& output = results[output_id]; + for (const auto& output : cloned_model->get_results()) { const auto& shape = output->get_output_partial_shape(0); if (shape.is_dynamic()) OPENVINO_THROW("Auto-batching does not support dynamic networks!"); From 509e00da60f02208ffed92ab46a3e13b6ee6810e Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 5 Sep 2023 00:42:11 +0400 Subject: [PATCH 10/16] Move Inference Functional Caching Tests to new API (#19513) * Moved inference unit tests to new API * Added infer request and variable state * Try to fix LTO * Try to avoid warning from gmock * Try to fix azure build * Try to fix Windows build * Comment all variable_state_test file for future investigation * Start migration of caching tests to new API * Removed legacy API from mock_plugin * Fixed more tests * Remove redundant code * Enable more tests * Move all tests, need to reenable tests * Revert incorrect change * Cosmetic changes * Fixed AUTO BATCH tests and disabled hetero tests * Fixed crash in HETERO tests --- .../tests/functional/caching_test.cpp | 2710 ++++++++--------- .../mocks/mock_engine/mock_plugin.cpp | 22 - 2 files changed, 1260 insertions(+), 1472 deletions(-) diff --git a/src/inference/tests/functional/caching_test.cpp b/src/inference/tests/functional/caching_test.cpp index dda1a727b0acac..fb3cd83b61cd0d 100644 --- a/src/inference/tests/functional/caching_test.cpp +++ b/src/inference/tests/functional/caching_test.cpp @@ -14,38 +14,37 @@ #include #include "common_test_utils/file_utils.hpp" -#include "common_test_utils/test_constants.hpp" -#include "common_test_utils/unicode_utils.hpp" -#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" -#include "ie_core.hpp" -#include "ie_metric_helpers.hpp" -#include "ie_remote_context.hpp" -#include "ngraph/function.hpp" +#include "ie_plugin_config.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "openvino/core/model.hpp" +#include "openvino/core/any.hpp" +#include "openvino/core/except.hpp" +#include "openvino/core/layout.hpp" #include "openvino/op/logical_not.hpp" +#include "openvino/op/parameter.hpp" #include "openvino/pass/manager.hpp" #include "openvino/pass/serialize.hpp" -#include "openvino/util/file_util.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" -#include "unit_test_utils/mocks/mock_iexecutable_network.hpp" -#include "unit_test_utils/mocks/mock_iinfer_request.hpp" +#include "openvino/runtime/common.hpp" +#include "openvino/runtime/compiled_model.hpp" +#include "openvino/runtime/core.hpp" +#include "openvino/runtime/icompiled_model.hpp" +#include "openvino/runtime/iplugin.hpp" +#include "openvino/runtime/iremote_context.hpp" +#include "openvino/runtime/properties.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp" -using namespace InferenceEngine; using namespace ::testing; -using namespace InferenceEngine::details; using namespace std::placeholders; using namespace std::chrono; -enum class TestLoadType { ECNN, EContext, EModelName }; +enum class TestLoadType { EModel, EContext, EModelName }; using TestParam = std::tuple; // GCC4.8 limitation: have to specify type of each element in list static const std::vector loadVariants = { - TestParam{TestLoadType::ECNN, std::string("ByCNNNetwork"), false}, + TestParam{TestLoadType::EModel, std::string("ByModel"), false}, TestParam{TestLoadType::EContext, std::string("ByRemoteContext"), true}, TestParam{TestLoadType::EModelName, std::string("ByModelName"), false}, }; @@ -54,103 +53,58 @@ static const std::vector cacheFolders{ std::string("testCache"), }; -class MockRemoteContext : public RemoteContext { +class MockRemoteContext : public ov::IRemoteContext { std::string m_name; public: MockRemoteContext(std::string name) : m_name(std::move(name)) {} - std::string getDeviceName() const noexcept override { + const std::string& get_device_name() const override { return m_name; } - MOCK_METHOD2(CreateBlob, RemoteBlob::Ptr(const TensorDesc&, const ParamMap&)); - MOCK_CONST_METHOD0(getParams, ParamMap()); + MOCK_METHOD(ov::SoPtr, + create_tensor, + (const ov::element::Type&, const ov::Shape&, const ov::AnyMap&)); + MOCK_METHOD(const ov::AnyMap&, get_property, (), (const)); }; -class MockCachingInferencePluginBase : public InferenceEngine::IInferencePlugin { +class MockCachingIPluginBase : public ov::MockIPlugin { public: - MockCachingInferencePluginBase() = default; - ~MockCachingInferencePluginBase() = default; + MockCachingIPluginBase() = default; + ~MockCachingIPluginBase() = default; - ov::SoPtr LoadNetwork(const std::string& modelPath, - const std::map& config) override { + std::shared_ptr compile_model(const std::string& model_path, + const ov::AnyMap& config) const override { // In GTEST, it is not possible to call base implementation inside of mocked class // Thus, we define a proxy callback and will use - // EXPECT_CALL(OnLoadNetworkFromFile) instead of EXPECT_CALL(LoadNetwork) - OnLoadNetworkFromFile(); - return InferenceEngine::IInferencePlugin::LoadNetwork(modelPath, config); + // EXPECT_CALL(OnCompileModelFromFile) instead of EXPECT_CALL(compile_model) + OnCompileModelFromFile(); + return ov::IPlugin::compile_model(model_path, config); } - virtual void OnLoadNetworkFromFile() const {} + virtual void OnCompileModelFromFile() const {} }; -class MockCachingInferencePlugin : public MockCachingInferencePluginBase { +class MockCachingIPlugin : public MockCachingIPluginBase { public: - MockCachingInferencePlugin() = default; - ~MockCachingInferencePlugin() = default; + MockCachingIPlugin() = default; + ~MockCachingIPlugin() = default; - MOCK_METHOD2(LoadExeNetworkImpl, - std::shared_ptr(const CNNNetwork& network, - const std::map& config)); - - MOCK_METHOD3(LoadExeNetworkImpl, - std::shared_ptr(const CNNNetwork& network, - const RemoteContext::Ptr& context, - const std::map& config)); - - MOCK_CONST_METHOD0(OnLoadNetworkFromFile, void(void)); - - MOCK_METHOD2(ImportNetwork, - IExecutableNetworkInternal::Ptr(std::istream& networkModel, - const std::map& config)); - - MOCK_METHOD3(ImportNetwork, - IExecutableNetworkInternal::Ptr(std::istream& networkModel, - const RemoteContext::Ptr& context, - const std::map& config)); - - MOCK_CONST_METHOD2(QueryNetwork, - QueryNetworkResult(const CNNNetwork& network, const std::map& config)); - - MOCK_CONST_METHOD2(GetMetric, Parameter(const std::string& name, const std::map& options)); - MOCK_METHOD1(SetConfig, void(const std::map& options)); - MOCK_METHOD1(GetDefaultContext, std::shared_ptr(const ParamMap& params)); + MOCK_METHOD(void, OnCompileModelFromFile, (), (const)); }; -class MockExecutableNetwork : public IExecutableNetworkInternal { +class MockICompiledModelImpl : public ov::MockICompiledModel { std::mutex m_pluginMutex; - std::shared_ptr m_model = nullptr; + std::shared_ptr m_model = nullptr; public: - MockExecutableNetwork() {} - - MOCK_METHOD1(Export, void(std::ostream& networkModel)); - MOCK_METHOD0(CreateInferRequest, IInferRequestInternal::Ptr()); - MOCK_CONST_METHOD0(GetInputsInfo, ConstInputsDataMap()); - MOCK_CONST_METHOD0(GetOutputsInfo, ConstOutputsDataMap()); - MOCK_CONST_METHOD0(getInputs, const std::vector>&()); - MOCK_CONST_METHOD0(getOutputs, const std::vector>&()); - MOCK_CONST_METHOD1(GetConfig, Parameter(const std::string& name)); - MOCK_CONST_METHOD1(GetMetric, Parameter(const std::string& name)); - MOCK_METHOD2(CreateInferRequestImpl, IInferRequestInternal::Ptr(InputsDataMap, OutputsDataMap)); - MOCK_METHOD1(setNetworkInputs, void(const InputsDataMap& networkInputs)); - MOCK_METHOD1(setNetworkOutputs, void(const OutputsDataMap& networkOutputs)); - MOCK_METHOD0(GetExecGraphInfo, std::shared_ptr()); - - // void Export(std::ostream& networkModel) override { - // std::lock_guard guard(m_pluginMutex); - // IExecutableNetworkInternal::Export(networkModel); - // } - - void set_model(const std::shared_ptr& model) { - m_model = model->clone(); - } - const std::shared_ptr& get_model() const { - return m_model; + MockICompiledModelImpl(const std::shared_ptr& model, + const std::shared_ptr& plugin) + : ov::MockICompiledModel(model, plugin) { + m_model = model; } - void SetPointerToPlugin(const IInferencePlugin::Ptr& plugin) override { - std::lock_guard guard(m_pluginMutex); - IExecutableNetworkInternal::SetPointerToPlugin(plugin); + const std::shared_ptr& get_model() const { + return m_model; } }; @@ -179,32 +133,30 @@ class MkDirGuard { class CachingTest : public ::testing::TestWithParam> { public: std::shared_ptr sharedObjectLoader; - std::function injectProxyEngine; + std::function injectPlugin; std::string modelName = "Caching_test.xml"; std::string weightsName = "Caching_test.bin"; std::string deviceName = "mock"; std::string deviceToLoad = "mock"; - std::shared_ptr mockPlugin; - std::vector> networks; + std::shared_ptr mockPlugin; + std::vector> comp_models; std::mutex mock_creation_mutex; // Internal gmock object registration is not thread-safe - using ExeNetCallback = std::function; + using ExeNetCallback = std::function; std::vector m_post_mock_net_callbacks = {}; std::unique_ptr m_dirCreator; - TestLoadType m_type = TestLoadType::ECNN; + TestLoadType m_type = TestLoadType::EModel; std::string m_cacheDir; - using LoadFunction = std::function; - using LoadFunctionWithCfg = std::function&)>; + using LoadFunction = std::function; + using LoadFunctionWithCfg = std::function; LoadFunction m_testFunction; LoadFunctionWithCfg m_testFunctionWithCfg; + using ModelCallbackFunc = std::function&)>; + ModelCallbackFunc m_modelCallback; bool m_remoteContext = false; - using CNNCallback = std::function; - CNNCallback m_cnnCallback = nullptr; - std::map m_inputs_map; - std::map m_outputs_map; - std::map>> m_inputs; - std::map>> m_outputs; - using CheckConfigCb = std::function&)>; + using CheckConfigCb = std::function; CheckConfigCb m_checkConfigCb = nullptr; + std::shared_ptr m_model; + std::map> m_models; static std::string get_mock_engine_path() { std::string mockEngineName("mock_engine"); @@ -225,63 +177,31 @@ class CachingTest : public ::testing::TestWithParam(new MkDirGuard(m_cacheDir)); } - static std::shared_ptr createMockIExecutableNet( - const std::string& name, - const InputsDataMap& inputs_map, - const OutputsDataMap& outputs_map, - const std::vector>& inputs, - const std::vector>& outputs) { - auto mock = std::make_shared(); - ConstInputsDataMap inputMap; - for (const auto& input_item : inputs_map) { - inputMap.insert({input_item.first, input_item.second}); - } - ConstOutputsDataMap outputMap; - for (const auto& output_item : outputs_map) { - outputMap.insert({output_item.first, output_item.second}); - } - EXPECT_CALL(*mock, GetInputsInfo()).Times(AnyNumber()).WillRepeatedly(Return(inputMap)); - EXPECT_CALL(*mock, GetOutputsInfo()).Times(AnyNumber()).WillRepeatedly(Return(outputMap)); - EXPECT_CALL(*mock, getInputs()).Times(AnyNumber()).WillRepeatedly(ReturnRef(inputs)); - EXPECT_CALL(*mock, getOutputs()).Times(AnyNumber()).WillRepeatedly(ReturnRef(outputs)); - EXPECT_CALL(*mock, GetConfig(ov::enable_profiling.name())) - .Times(AnyNumber()) - .WillRepeatedly(Return(Parameter{PluginConfigParams::NO})); - EXPECT_CALL(*mock, GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))) + static std::shared_ptr create_mock_compiled_model( + const std::shared_ptr& model, + const std::shared_ptr& plugin) { + auto mock = std::make_shared(model, plugin); + EXPECT_CALL(*mock, inputs()).Times(AnyNumber()).WillRepeatedly(ReturnRefOfCopy(model->inputs())); + EXPECT_CALL(*mock, outputs()).Times(AnyNumber()).WillRepeatedly(ReturnRefOfCopy(model->outputs())); + EXPECT_CALL(*mock, get_runtime_model()).Times(AnyNumber()).WillRepeatedly(Return(model)); + auto ptr = std::make_shared(); + EXPECT_CALL(*ptr, set_callback(_)).Times(AnyNumber()); + EXPECT_CALL(*mock, create_infer_request()).Times(AnyNumber()).WillRepeatedly(Return(ptr)); + + EXPECT_CALL(*mock, get_property(ov::enable_profiling.name())) .Times(AnyNumber()) - .WillRepeatedly(Return(Parameter{1u})); - EXPECT_CALL(*mock, GetExecGraphInfo()).Times(AnyNumber()).WillRepeatedly(Return([] { - ngraph::ParameterVector parameters; - parameters.push_back(std::make_shared(ov::element::f32, ov::Shape{1, 3, 8, 8})); - auto notOp = std::make_shared(parameters.back()); - ngraph::ResultVector results; - results.push_back(std::make_shared(notOp)); - return std::make_shared(results, parameters, "empty_function"); - }())); - auto ptr = std::make_shared(); - EXPECT_CALL(*ptr, SetCallback(_)).Times(AnyNumber()); - EXPECT_CALL(*mock, CreateInferRequest()).Times(AnyNumber()).WillRepeatedly(Return(ptr)); - - EXPECT_CALL(*mock, GetMetric(METRIC_KEY(NETWORK_NAME))).Times(AnyNumber()).WillRepeatedly(Return("mock_net")); - EXPECT_CALL(*mock, GetMetric(METRIC_KEY(SUPPORTED_METRICS))) + .WillRepeatedly(Return(ov::Any{false})); + EXPECT_CALL(*mock, get_property(ov::optimal_number_of_infer_requests.name())) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&) { - std::vector res; - res.emplace_back(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)); - res.emplace_back(METRIC_KEY(NETWORK_NAME)); - return res; - })); - EXPECT_CALL(*mock, GetMetric(ov::supported_properties.name())) + .WillRepeatedly(Return(ov::Any{1u})); + EXPECT_CALL(*mock, get_property(ov::model_name.name())).Times(AnyNumber()).WillRepeatedly(Return("mock_net")); + EXPECT_CALL(*mock, get_property(ov::supported_properties.name())) .Times(AnyNumber()) .WillRepeatedly(Return(std::vector{ov::supported_properties.name(), ov::optimal_number_of_infer_requests.name(), ov::model_name.name()})); - EXPECT_CALL(*mock, setNetworkInputs(_)).Times(AnyNumber()); - EXPECT_CALL(*mock, setNetworkOutputs(_)).Times(AnyNumber()); - mock->setNetworkInputs(copyInfo(inputs_map)); - mock->setNetworkOutputs(copyInfo(outputs_map)); - ON_CALL(*mock, Export(_)).WillByDefault(Invoke([name](std::ostream& s) { - s << name; + ON_CALL(*mock, export_model(_)).WillByDefault(Invoke([model](std::ostream& s) { + s << model->get_friendly_name(); s << ' '; })); return mock; @@ -289,11 +209,11 @@ class CachingTest : public ::testing::TestWithParam(); + mockPlugin = std::make_shared(); setupMock(*mockPlugin); std::string libraryPath = get_mock_engine_path(); sharedObjectLoader = ov::util::load_shared_object(libraryPath.c_str()); - injectProxyEngine = make_std_function("InjectProxyEngine"); + injectPlugin = make_std_function("InjectPlugin"); ov::pass::Manager manager; manager.register_pass(modelName, weightsName); @@ -301,40 +221,36 @@ class CachingTest : public ::testing::TestWithParam& func) { - Core ie; - injectProxyEngine(mockPlugin.get()); - ie.RegisterPlugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + IE_BUILD_POSTFIX), - deviceName); - func(ie); - ie.UnregisterPlugin(deviceName); + void testLoad(const std::function& func) { + ov::Core core; + injectPlugin(mockPlugin.get()); + core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), + std::string("mock_engine") + IE_BUILD_POSTFIX), + deviceName); + func(core); + core.unload_plugin(deviceName); } LoadFunction getLoadFunction(TestLoadType type) const { switch (type) { - case TestLoadType::ECNN: - return [&](Core& ie) { - return performReadAndLoad(ie); + case TestLoadType::EModel: + return [&](ov::Core& core) { + return performReadAndLoad(core); }; case TestLoadType::EContext: - return [&](Core& ie) { - return performReadAndLoadWithContext(ie); + return [&](ov::Core& core) { + return performReadAndLoadWithContext(core); }; case TestLoadType::EModelName: - return [&](Core& ie) { - return performLoadByName(ie); + return [&](ov::Core& core) { + return performLoadByName(core); }; } return nullptr; @@ -342,7 +258,7 @@ class CachingTest : public ::testing::TestWithParam& config = {}) const { - return ie.LoadNetwork(modelName, deviceToLoad, config); + ov::CompiledModel performLoadByName(ov::Core& core, const ov::AnyMap& config = {}) const { + return core.compile_model(modelName, deviceToLoad, config); } - ExecutableNetwork performReadAndLoad(Core& ie, const std::map& config = {}) const { - auto cnnNetwork = ie.ReadNetwork(modelName); - if (m_cnnCallback) - m_cnnCallback(cnnNetwork); - return ie.LoadNetwork(cnnNetwork, deviceToLoad, config); + ov::CompiledModel performReadAndLoad(ov::Core& core, const ov::AnyMap& config = {}) const { + auto model = core.read_model(modelName); + if (m_modelCallback) + m_modelCallback(model); + return core.compile_model(model, deviceToLoad, config); } - ExecutableNetwork performReadAndLoadWithContext(Core& ie, - const std::map& config = {}) const { - auto cnnNetwork = ie.ReadNetwork(modelName); - EXPECT_CALL(*mockPlugin, GetDefaultContext(_)).Times(AnyNumber()); - auto context = ie.GetDefaultContext(deviceToLoad); - if (m_cnnCallback) - m_cnnCallback(cnnNetwork); - return ie.LoadNetwork(cnnNetwork, context, config); + ov::CompiledModel performReadAndLoadWithContext(ov::Core& core, const ov::AnyMap& config = {}) const { + auto model = core.read_model(modelName); + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + auto context = core.get_default_context(deviceToLoad); + if (m_modelCallback) + m_modelCallback(model); + return core.compile_model(model, context, config); } private: @@ -380,57 +295,42 @@ class CachingTest : public ::testing::TestWithParam&) { - std::vector res; - res.emplace_back(METRIC_KEY(IMPORT_EXPORT_SUPPORT)); - res.emplace_back(ov::internal::caching_properties.name()); - res.emplace_back(METRIC_KEY(DEVICE_ARCHITECTURE)); - return res; + void setupMock(MockCachingIPlugin& plugin) { + ON_CALL(plugin, get_property(_, _)) + .WillByDefault(Invoke([&](const std::string& name, const ov::AnyMap&) -> ov::Any { + OPENVINO_THROW("Unexpected ", name); })); - ON_CALL(plugin, GetMetric(ov::supported_properties.name(), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { + ON_CALL(plugin, get_property(ov::supported_properties.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { return std::vector{ov::supported_properties.name(), - METRIC_KEY(IMPORT_EXPORT_SUPPORT), ov::device::capabilities.name(), ov::device::architecture.name()}; })); - ON_CALL(plugin, GetMetric(ov::internal::supported_properties.name(), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { + ON_CALL(plugin, get_property(ov::internal::supported_properties.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { return std::vector{ov::internal::caching_properties.name()}; })); - ON_CALL(plugin, GetMetric(METRIC_KEY(OPTIMIZATION_CAPABILITIES), _)) - .WillByDefault(Return(std::vector())); + ON_CALL(plugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).WillByDefault(Return(true)); - ON_CALL(plugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).WillByDefault(Return(true)); - - ON_CALL(plugin, GetMetric(ov::device::capabilities.name(), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { + ON_CALL(plugin, get_property(ov::device::capabilities.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { return decltype(ov::device::capabilities)::value_type{ov::device::capability::EXPORT_IMPORT}; })); - ON_CALL(plugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { - std::vector res; - res.emplace_back("SomeConfig"); - return res; - })); - - ON_CALL(plugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { + ON_CALL(plugin, get_property(ov::device::architecture.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { return "mock"; })); - ON_CALL(plugin, GetMetric(ov::internal::caching_properties.name(), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { + ON_CALL(plugin, get_property(ov::internal::caching_properties.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { std::vector cachingProperties = {ov::device::architecture.name()}; return decltype(ov::internal::caching_properties)::value_type(cachingProperties); })); - ON_CALL(plugin, ImportNetwork(_, _, _)) - .WillByDefault(Invoke( - [&](std::istream& istr, const RemoteContext::Ptr&, const std::map& config) { + ON_CALL(plugin, import_model(_, _, _)) + .WillByDefault( + Invoke([&](std::istream& istr, const ov::SoPtr&, const ov::AnyMap& config) { if (m_checkConfigCb) { m_checkConfigCb(config); } @@ -439,723 +339,641 @@ class CachingTest : public ::testing::TestWithParam lock(mock_creation_mutex); - return createMockIExecutableNet({}, - m_inputs_map[name], - m_outputs_map[name], - m_inputs[name], - m_outputs[name]); + return create_mock_compiled_model(m_models[name], mockPlugin); })); - ON_CALL(plugin, ImportNetwork(_, _)) - .WillByDefault(Invoke([&](std::istream& istr, const std::map& config) { - if (m_checkConfigCb) { - m_checkConfigCb(config); - } - std::string name; - istr >> name; - char space; - istr.read(&space, 1); - std::lock_guard lock(mock_creation_mutex); - return createMockIExecutableNet({}, - m_inputs_map[name], - m_outputs_map[name], - m_inputs[name], - m_outputs[name]); - })); + ON_CALL(plugin, import_model(_, _)).WillByDefault(Invoke([&](std::istream& istr, const ov::AnyMap& config) { + if (m_checkConfigCb) { + m_checkConfigCb(config); + } + std::string name; + istr >> name; + char space; + istr.read(&space, 1); + std::lock_guard lock(mock_creation_mutex); + return create_mock_compiled_model(m_models[name], mockPlugin); + })); - ON_CALL(plugin, LoadExeNetworkImpl(_, _, _)) - .WillByDefault(Invoke([&](const CNNNetwork& cnn, - const RemoteContext::Ptr&, - const std::map& config) { + ON_CALL(plugin, compile_model(_, _, _)) + .WillByDefault(Invoke([&](const std::shared_ptr& model, + const ov::AnyMap& config, + const ov::SoPtr&) { if (m_checkConfigCb) { m_checkConfigCb(config); } std::lock_guard lock(mock_creation_mutex); - std::string name = cnn.getFunction()->get_friendly_name(); - m_inputs_map[name] = cnn.getInputsInfo(); - m_outputs_map[name] = cnn.getOutputsInfo(); - std::vector> inputs_, outputs_; - for (const auto& input : cnn.getFunction()->inputs()) - inputs_.emplace_back(input.get_node_shared_ptr()); - for (const auto& output : cnn.getFunction()->outputs()) - outputs_.emplace_back(output.get_node_shared_ptr()); - m_inputs[name] = inputs_; - m_outputs[name] = outputs_; - auto exe_net = createMockIExecutableNet(cnn.getFunction()->get_friendly_name(), - m_inputs_map[name], - m_outputs_map[name], - m_inputs[name], - m_outputs[name]); - exe_net->set_model(cnn.getFunction()); + m_models[model->get_friendly_name()] = model->clone(); + auto comp_model = create_mock_compiled_model(m_models[model->get_friendly_name()], mockPlugin); for (const auto& cb : m_post_mock_net_callbacks) { - cb(*exe_net); + cb(*comp_model); } - networks.push_back(exe_net); - return exe_net; + comp_models.push_back(comp_model); + return comp_model; })); - ON_CALL(plugin, LoadExeNetworkImpl(_, _)) - .WillByDefault(Invoke([&](const CNNNetwork& cnn, const std::map& config) { + ON_CALL(plugin, compile_model(A&>(), _)) + .WillByDefault(Invoke([&](const std::shared_ptr& model, const ov::AnyMap& config) { if (m_checkConfigCb) { m_checkConfigCb(config); } - std::string name = cnn.getFunction()->get_friendly_name(); std::lock_guard lock(mock_creation_mutex); - m_inputs_map[name] = cnn.getInputsInfo(); - m_outputs_map[name] = cnn.getOutputsInfo(); - cnn.getFunction()->inputs(); - std::vector> inputs_, outputs_; - for (const auto& input : cnn.getFunction()->inputs()) - inputs_.emplace_back(input.get_node_shared_ptr()); - for (const auto& output : cnn.getFunction()->outputs()) - outputs_.emplace_back(output.get_node_shared_ptr()); - m_inputs[name] = inputs_; - m_outputs[name] = outputs_; - auto exe_net = createMockIExecutableNet(cnn.getFunction()->get_friendly_name(), - m_inputs_map[name], - m_outputs_map[name], - m_inputs[name], - m_outputs[name]); - exe_net->set_model(cnn.getFunction()); + m_models[model->get_friendly_name()] = model->clone(); + auto comp_model = create_mock_compiled_model(m_models[model->get_friendly_name()], mockPlugin); for (const auto& cb : m_post_mock_net_callbacks) { - cb(*exe_net); + cb(*comp_model); } - networks.push_back(exe_net); - return exe_net; + comp_models.push_back(comp_model); + return comp_model; })); - ON_CALL(plugin, GetDefaultContext(_)).WillByDefault(Invoke([&](const ParamMap&) { + ON_CALL(plugin, get_default_context(_)).WillByDefault(Invoke([&](const ov::AnyMap&) { return std::make_shared(deviceToLoad); })); - ON_CALL(plugin, QueryNetwork(_, _)) - .WillByDefault(Invoke([&](const CNNNetwork& network, const std::map&) { - QueryNetworkResult res; - auto function = network.getFunction(); - EXPECT_TRUE(function); + ON_CALL(plugin, query_model(_, _)) + .WillByDefault(Invoke([&](const std::shared_ptr& model, const ov::AnyMap&) { + ov::SupportedOpsMap res; + EXPECT_TRUE(model); - for (auto&& node : function->get_ops()) { - res.supportedLayersMap.emplace(node->get_friendly_name(), deviceName); + for (auto&& node : model->get_ops()) { + res.emplace(node->get_friendly_name(), deviceName); } return res; })); - EXPECT_CALL(plugin, SetConfig(_)) - .Times(AnyNumber()) - .WillRepeatedly(Invoke([](const std::map&) { - throw InferenceEngine::NotImplemented("Not implemented"); - })); + EXPECT_CALL(plugin, set_property(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](const ov::AnyMap&) { + OPENVINO_NOT_IMPLEMENTED; + })); } }; TEST_P(CachingTest, TestLoad) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); - EXPECT_EQ(networks.size(), 1); + EXPECT_EQ(comp_models.size(), 1); } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); // No more 'Export' for existing networks + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& model : comp_models) { + EXPECT_CALL(*model, export_model(_)).Times(0); // No more 'export_model' for existing model } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); - EXPECT_EQ(networks.size(), 1); + EXPECT_EQ(comp_models.size(), 1); } } -/// \brief Verifies that ie.SetConfig({{"CACHE_DIR", }}, "deviceName"}}); enables caching for one device +/// \brief Verifies that core.set_property({{"CACHE_DIR", }}, "deviceName"}}); enables caching for one device TEST_P(CachingTest, TestLoad_by_device_name) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}, "mock"); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property("mock", ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); - EXPECT_EQ(networks.size(), 1); + EXPECT_EQ(comp_models.size(), 1); } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); // No more 'Export' for existing networks + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& model : comp_models) { + EXPECT_CALL(*model, export_model(_)).Times(0); // No more 'export_model' for existing models } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}, "mock"); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property("mock", ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); - EXPECT_EQ(networks.size(), 1); + EXPECT_EQ(comp_models.size(), 1); } } TEST_P(CachingTest, TestLoadCustomImportExport) { const char customData[] = {1, 2, 3, 4, 5}; - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - ON_CALL(*mockPlugin, ImportNetwork(_, _, _)) - .WillByDefault( - Invoke([&](std::istream& s, const RemoteContext::Ptr&, const std::map&) { - char a[sizeof(customData)]; - s.read(a, sizeof(customData)); - EXPECT_EQ(memcmp(a, customData, sizeof(customData)), 0); - std::string name; - s >> name; - std::lock_guard lock(mock_creation_mutex); - return createMockIExecutableNet({}, - m_inputs_map[name], - m_outputs_map[name], - m_inputs[name], - m_outputs[name]); - })); - - ON_CALL(*mockPlugin, ImportNetwork(_, _)) - .WillByDefault(Invoke([&](std::istream& s, const std::map&) { + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + + ON_CALL(*mockPlugin, import_model(_, _, _)) + .WillByDefault(Invoke([&](std::istream& s, const ov::SoPtr&, const ov::AnyMap&) { char a[sizeof(customData)]; s.read(a, sizeof(customData)); EXPECT_EQ(memcmp(a, customData, sizeof(customData)), 0); std::string name; s >> name; std::lock_guard lock(mock_creation_mutex); - return createMockIExecutableNet({}, - m_inputs_map[name], - m_outputs_map[name], - m_inputs[name], - m_outputs[name]); + return create_mock_compiled_model(m_models[name], mockPlugin); })); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - ON_CALL(net, Export(_)).WillByDefault(Invoke([&](std::ostream& s) { + ON_CALL(*mockPlugin, import_model(_, _)).WillByDefault(Invoke([&](std::istream& s, const ov::AnyMap&) { + char a[sizeof(customData)]; + s.read(a, sizeof(customData)); + EXPECT_EQ(memcmp(a, customData, sizeof(customData)), 0); + std::string name; + s >> name; + std::lock_guard lock(mock_creation_mutex); + return create_mock_compiled_model(m_models[name], mockPlugin); + })); + + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + ON_CALL(net, export_model(_)).WillByDefault(Invoke([&](std::ostream& s) { s.write(customData, sizeof(customData)); s << net.get_model()->get_friendly_name(); })); }); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); // No 'Export' for existing networks + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& model : comp_models) { + EXPECT_CALL(*model, export_model(_)).Times(0); // No 'export_model' for existing models } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } -// Brief: when LoadNetwork is called from different config - old cache shall not be used +// Brief: when compile_model is called from different config - old cache shall not be used TEST_P(CachingTest, TestChangeLoadConfig) { const std::string CUSTOM_KEY = "CUSTOM_KEY"; - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - ON_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + + ON_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { return std::vector{ov::supported_properties.name(), - METRIC_KEY(IMPORT_EXPORT_SUPPORT), ov::device::capabilities.name(), ov::device::architecture.name()}; })); - ON_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { + ON_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { return std::vector{ov::internal::caching_properties.name()}; })); - ON_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { + ON_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { std::vector res; res.push_back(ov::PropertyName(CUSTOM_KEY, ov::PropertyMutability::RO)); return decltype(ov::internal::caching_properties)::value_type(res); })); - ON_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { - std::vector res; - res.push_back(ov::internal::caching_properties.name()); - return res; - })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunctionWithCfg(ie, {{CUSTOM_KEY, "0"}}); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunctionWithCfg(core, {{CUSTOM_KEY, "0"}}); }); } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunctionWithCfg(ie, {{CUSTOM_KEY, "1"}}); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunctionWithCfg(core, {{CUSTOM_KEY, "1"}}); }); } } -/// \brief Verifies that ie.LoadNetwork(cnn, "deviceName", {{"CACHE_DIR", >}}) works +/// \brief Verifies that core.compile_model(model, "deviceName", {{"CACHE_DIR", >}}) works TEST_P(CachingTest, TestChangeLoadConfig_With_Cache_Dir_inline) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - ON_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)) - .WillByDefault(Invoke([&](const std::string&, const std::map&) { - return std::vector{}; - })); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - m_testFunctionWithCfg(ie, {{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); + testLoad([&](ov::Core& core) { + m_testFunctionWithCfg(core, ov::AnyMap{{ov::cache_dir.name(), m_cacheDir}}); }); } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); // No more 'Export' for existing networks + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& model : comp_models) { + EXPECT_CALL(*model, export_model(_)).Times(0); // No more 'export_model' for existing models } - testLoad([&](Core& ie) { - m_testFunctionWithCfg(ie, {{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); + testLoad([&](ov::Core& core) { + m_testFunctionWithCfg(core, ov::AnyMap{{ov::cache_dir.name(), m_cacheDir}}); }); - EXPECT_EQ(networks.size(), 1); + EXPECT_EQ(comp_models.size(), 1); } } TEST_P(CachingTest, TestNoCacheEnabled) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(0); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); }); - testLoad([&](Core& ie) { - m_testFunction(ie); + testLoad([&](ov::Core& core) { + m_testFunction(core); }); } } TEST_P(CachingTest, TestNoCacheSupported) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)) .Times(AnyNumber()) .WillRepeatedly(Return(false)); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(decltype(ov::device::capabilities)::value_type{})); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } TEST_P(CachingTest, TestNoCacheMetricSupported) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(0); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(0); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(0); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(0); + { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } /// \brief If device doesn't support 'cache_dir' or 'import_export' - setting cache_dir is ignored TEST_P(CachingTest, TestNoCacheMetricSupported_by_device_name) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(0); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(0); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(0); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(0); + { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}, "mock"); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property("mock", ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } TEST_P(CachingTest, TestNoCacheMetric_hasCacheDirConfig) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{METRIC_KEY(SUPPORTED_CONFIG_KEYS)})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{ov::supported_properties.name(), ov::cache_dir.name()})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AtLeast(1)) - .WillRepeatedly(Invoke([](const std::map& config) { - ASSERT_GT(config.count(CONFIG_KEY(CACHE_DIR)), 0); - })); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AtLeast(1)).WillRepeatedly(Invoke([](const ov::AnyMap& config) { + ASSERT_GT(config.count(ov::cache_dir.name()), 0); + })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - ASSERT_NO_THROW(testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); + ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); })); } } -/// \brief If device supports 'cache_dir' or 'import_export' - setting cache_dir is passed to plugin on ie.LoadNetwork +/// \brief If device supports 'cache_dir' or 'import_export' - setting cache_dir is passed to plugin on +/// core.compile_model TEST_P(CachingTest, TestNoCacheMetric_hasCacheDirConfig_inline) { - m_checkConfigCb = [](const std::map& config) { - EXPECT_NE(config.count(CONFIG_KEY(CACHE_DIR)), 0); + m_checkConfigCb = [](const ov::AnyMap& config) { + EXPECT_NE(config.count(ov::cache_dir.name()), 0); }; - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{METRIC_KEY(SUPPORTED_CONFIG_KEYS)})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{ov::supported_properties.name(), ov::cache_dir.name()})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{})); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - ASSERT_NO_THROW(testLoad([&](Core& ie) { - m_testFunctionWithCfg(ie, {{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); + ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + m_testFunctionWithCfg(core, {{ov::cache_dir.name(), m_cacheDir}}); })); } } -/// \brief ie.SetConfig(, "deviceName") is propagated to plugin's SetConfig if device supports CACHE_DIR +/// \brief core.set_property(, "deviceName") is propagated to plugin's set_property if device supports +/// CACHE_DIR TEST_P(CachingTest, TestNoCacheMetric_hasCacheDirConfig_by_device_name) { - m_checkConfigCb = [](const std::map& config) { - // Shall be '0' as appropriate 'cache_dir' is expected in SetConfig, not in Load/Import network - EXPECT_EQ(config.count(CONFIG_KEY(CACHE_DIR)), 0); + m_checkConfigCb = [](const ov::AnyMap& config) { + // Shall be '0' as appropriate 'cache_dir' is expected in set_property, not in Load/Import model + EXPECT_EQ(config.count(ov::cache_dir.name()), 0); }; - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{METRIC_KEY(SUPPORTED_CONFIG_KEYS)})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{ov::supported_properties.name(), ov::cache_dir.name()})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AtLeast(1)) - .WillRepeatedly(Invoke([](const std::map& config) { - ASSERT_GT(config.count(CONFIG_KEY(CACHE_DIR)), 0); - })); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AtLeast(1)).WillRepeatedly(Invoke([](const ov::AnyMap& config) { + ASSERT_GT(config.count(ov::cache_dir.name()), 0); + })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - ASSERT_NO_THROW(testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}, "mock"); - m_testFunction(ie); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); + ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + core.set_property("mock", ov::cache_dir(m_cacheDir)); + m_testFunction(core); })); } } TEST_P(CachingTest, TestCacheEnabled_noConfig) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{METRIC_KEY(SUPPORTED_CONFIG_KEYS)})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{ov::supported_properties.name()})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AnyNumber()) - .WillRepeatedly(Invoke([](const std::map& config) { - ASSERT_EQ(config.count(CONFIG_KEY(CACHE_DIR)), 0); - })); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](const ov::AnyMap& config) { + ASSERT_EQ(config.count(ov::cache_dir.name()), 0); + })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } TEST_P(CachingTest, TestNoCacheMetric_configThrow) { - m_checkConfigCb = [](const std::map& config) { - EXPECT_NE(config.count(CONFIG_KEY(CACHE_DIR)), 0); + m_checkConfigCb = [](const ov::AnyMap& config) { + EXPECT_NE(config.count(ov::cache_dir.name()), 0); }; - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{METRIC_KEY(SUPPORTED_CONFIG_KEYS)})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{ov::supported_properties.name(), ov::cache_dir.name()})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AtLeast(1)) - .WillRepeatedly(Invoke([](const std::map& config) { - ASSERT_GT(config.count(CONFIG_KEY(CACHE_DIR)), 0); - throw InferenceEngine::GeneralError("Error occurred"); - })); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AtLeast(1)).WillRepeatedly(Invoke([](const ov::AnyMap& config) { + ASSERT_GT(config.count(ov::cache_dir.name()), 0); + OPENVINO_THROW("Error occurred"); + })); - ASSERT_ANY_THROW(testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + ASSERT_ANY_THROW(testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); })); } TEST_P(CachingTest, TestNoCacheEnabled_cacheDirConfig) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{METRIC_KEY(SUPPORTED_CONFIG_KEYS)})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{ov::supported_properties.name(), ov::cache_dir.name()})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AtLeast(1)) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AnyNumber()) - .WillRepeatedly(Invoke([](const std::map& config) { - ASSERT_EQ(config.count(CONFIG_KEY(CACHE_DIR)), 0); - })); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](const ov::AnyMap& config) { + ASSERT_EQ(config.count(ov::cache_dir.name()), 0); + })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - testLoad([&](Core& ie) { - m_testFunction(ie); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + testLoad([&](ov::Core& core) { + m_testFunction(core); }); } } TEST_P(CachingTest, TestLoadChangeCacheDir) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } m_post_mock_net_callbacks.pop_back(); { std::string newCacheDir = m_cacheDir + "2"; MkDirGuard dir(newCacheDir); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), newCacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(newCacheDir)); + m_testFunction(core); }); } } /// \brief Change CACHE_DIR during working with same 'Core' object. Verifies that new dir is used for caching TEST_P(CachingTest, TestLoadChangeCacheDirOneCore) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AnyNumber()) - .WillRepeatedly(Invoke([](const std::map& config) { - ASSERT_EQ(config.count(CONFIG_KEY(CACHE_DIR)), 0); - })); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](const ov::AnyMap& config) { + ASSERT_EQ(config.count(ov::cache_dir.name()), 0); + })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - testLoad([&](Core& ie) { - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + testLoad([&](ov::Core& core) { + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); std::string newCacheDir = m_cacheDir + "2"; m_post_mock_net_callbacks.pop_back(); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); MkDirGuard dir(newCacheDir); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), newCacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(newCacheDir)); + m_testFunction(core); }); } } @@ -1163,85 +981,79 @@ TEST_P(CachingTest, TestLoadChangeCacheDirOneCore) { /// \brief Change CACHE_DIR during working with same 'Core' object /// Initially set for 'device', then is overwritten with global 'cache_dir' for all devices TEST_P(CachingTest, TestLoadChangeCacheDirOneCore_overwrite_device_dir) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AnyNumber()) - .WillRepeatedly(Invoke([](const std::map& config) { - ASSERT_EQ(config.count(CONFIG_KEY(CACHE_DIR)), 0); - })); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](const ov::AnyMap& config) { + ASSERT_EQ(config.count(ov::cache_dir.name()), 0); + })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - testLoad([&](Core& ie) { - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + testLoad([&](ov::Core& core) { + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}, "mock"); - m_testFunction(ie); + core.set_property("mock", ov::cache_dir(m_cacheDir)); + m_testFunction(core); std::string newCacheDir = m_cacheDir + "2"; m_post_mock_net_callbacks.pop_back(); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); MkDirGuard dir(newCacheDir); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), newCacheDir}}); - m_testFunction(ie); + core.set_property({ov::cache_dir(newCacheDir)}); + m_testFunction(core); }); } } /// \brief Change CACHE_DIR during working with same 'Core' object for device which supports 'CACHE_DIR' config, not -/// import_export Expectation is that SetConfig for plugin will be called 2 times - with appropriate cache_dir values +/// import_export Expectation is that set_property for plugin will be called 2 times - with appropriate cache_dir values TEST_P(CachingTest, TestLoadChangeCacheDirOneCore_SupportsCacheDir_NoImportExport) { - m_checkConfigCb = [](const std::map& config) { - EXPECT_EQ(config.count(CONFIG_KEY(CACHE_DIR)), 0); + m_checkConfigCb = [](const ov::AnyMap& config) { + EXPECT_EQ(config.count(ov::cache_dir.name()), 0); }; - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(std::vector{ov::supported_properties.name(), ov::cache_dir.name()})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{METRIC_KEY(SUPPORTED_CONFIG_KEYS)})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(decltype(ov::device::capabilities)::value_type{})); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); std::string set_cache_dir = {}; - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AtLeast(2)) - .WillRepeatedly(Invoke([&](const std::map& config) { - ASSERT_NE(config.count(CONFIG_KEY(CACHE_DIR)), 0); - set_cache_dir = config.at(CONFIG_KEY(CACHE_DIR)); - })); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AtLeast(2)).WillRepeatedly(Invoke([&](const ov::AnyMap& config) { + ASSERT_NE(config.count(ov::cache_dir.name()), 0); + set_cache_dir = config.at(ov::cache_dir.name()).as(); + })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 2 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); - }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 2 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); + }); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); EXPECT_EQ(set_cache_dir, m_cacheDir); std::string new_cache_dir = m_cacheDir + "2"; MkDirGuard dir(new_cache_dir); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), new_cache_dir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(new_cache_dir)); + m_testFunction(core); EXPECT_EQ(set_cache_dir, new_cache_dir); }); } @@ -1249,37 +1061,35 @@ TEST_P(CachingTest, TestLoadChangeCacheDirOneCore_SupportsCacheDir_NoImportExpor /// \brief Change CACHE_DIR per device during working with same 'Core' object - expected that new cache dir is used TEST_P(CachingTest, TestLoadChangeCacheDirOneCore_by_device_name) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AnyNumber()) - .WillRepeatedly(Invoke([](const std::map& config) { - ASSERT_EQ(config.count(CONFIG_KEY(CACHE_DIR)), 0); - })); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AnyNumber()).WillRepeatedly(Invoke([](const ov::AnyMap& config) { + ASSERT_EQ(config.count(ov::cache_dir.name()), 0); + })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - testLoad([&](Core& ie) { - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + testLoad([&](ov::Core& core) { + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}, "mock"); - m_testFunction(ie); + core.set_property("mock", ov::cache_dir(m_cacheDir)); + m_testFunction(core); m_post_mock_net_callbacks.pop_back(); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); std::string newCacheDir = m_cacheDir + "2"; MkDirGuard dir(newCacheDir); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), newCacheDir}}, "mock"); - m_testFunction(ie); + core.set_property("mock", ov::cache_dir(newCacheDir)); + m_testFunction(core); }); } } @@ -1287,134 +1097,132 @@ TEST_P(CachingTest, TestLoadChangeCacheDirOneCore_by_device_name) { /// \brief Change CACHE_DIR per device during working with same 'Core' object - device supports CACHE_DIR /// Verifies that no 'export' is called and cache_dir is propagated to set_config TEST_P(CachingTest, TestLoadChangeCacheDirOneCore_by_device_name_supports_cache_dir) { - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(std::vector{ov::cache_dir.name()})); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)) .Times(AnyNumber()) .WillRepeatedly(Return(false)); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(decltype(ov::device::capabilities)::value_type{})); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, SetConfig(_)) - .Times(AtLeast(2)) - .WillRepeatedly(Invoke([](const std::map& config) { - ASSERT_GT(config.count(CONFIG_KEY(CACHE_DIR)), 0); - })); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, set_property(_)).Times(AtLeast(2)).WillRepeatedly(Invoke([](const ov::AnyMap& config) { + ASSERT_GT(config.count(ov::cache_dir.name()), 0); + })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 2 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - testLoad([&](Core& ie) { - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 2 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + testLoad([&](ov::Core& core) { + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); }); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}, "mock"); - m_testFunction(ie); + core.set_property("mock", ov::cache_dir(m_cacheDir)); + m_testFunction(core); m_post_mock_net_callbacks.pop_back(); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); }); std::string newCacheDir = m_cacheDir + "2"; MkDirGuard dir(newCacheDir); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), newCacheDir}}, "mock"); - m_testFunction(ie); + core.set_property("mock", ov::cache_dir(newCacheDir)); + m_testFunction(core); }); } } TEST_P(CachingTest, TestClearCacheDir) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(0); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + for (auto& model : comp_models) { + EXPECT_CALL(*model, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), ""}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + core.set_property(ov::cache_dir("")); + m_testFunction(core); }); - EXPECT_EQ(networks.size(), 1); + EXPECT_EQ(comp_models.size(), 1); } } TEST_P(CachingTest, TestChangeOtherConfig) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - ie.SetConfig({{"someKey", "someValue"}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + core.set_property({{"someKey", "someValue"}}); + m_testFunction(core); }); - EXPECT_EQ(networks.size(), 1); + EXPECT_EQ(comp_models.size(), 1); } } TEST_P(CachingTest, TestChangeCacheDirFailure) { std::string longName(1000000, ' '); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); - EXPECT_EQ(networks.size(), 1); + EXPECT_EQ(comp_models.size(), 1); } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - EXPECT_ANY_THROW(ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir + "/" + longName}})); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + EXPECT_ANY_THROW(core.set_property(ov::cache_dir(m_cacheDir + "/" + longName))); + m_testFunction(core); }); } } @@ -1424,24 +1232,24 @@ TEST_P(CachingTest, TestCacheDirCreateRecursive) { std::string newCacheDir2 = newCacheDir1 + ov::test::utils::FileSeparator + "b"; std::string newCacheDir3 = newCacheDir2 + ov::test::utils::FileSeparator + ov::test::utils::FileSeparator; - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - EXPECT_NO_THROW(ie.SetConfig({{CONFIG_KEY(CACHE_DIR), newCacheDir3}})); - EXPECT_NO_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + EXPECT_NO_THROW(core.set_property(ov::cache_dir(newCacheDir3))); + EXPECT_NO_THROW(m_testFunction(core)); }); } ov::test::utils::removeFilesWithExt(newCacheDir2, "blob"); @@ -1450,15 +1258,14 @@ TEST_P(CachingTest, TestCacheDirCreateRecursive) { } TEST_P(CachingTest, TestDeviceArchitecture) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map& options) { + .WillRepeatedly(Invoke([&](const std::string&, const ov::AnyMap& options) { auto id = options.at("DEVICE_ID").as(); if (std::stoi(id) < 10) { return "mock_first_architecture"; @@ -1467,182 +1274,174 @@ TEST_P(CachingTest, TestDeviceArchitecture) { } })); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); - }); - testLoad([&](Core& ie) { + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); + }); + testLoad([&](ov::Core& core) { deviceToLoad = "mock.0"; - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { + testLoad([&](ov::Core& core) { deviceToLoad = "mock.1"; - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); - }); - testLoad([&](Core& ie) { + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); + }); + testLoad([&](ov::Core& core) { deviceToLoad = "mock.50"; - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { + testLoad([&](ov::Core& core) { deviceToLoad = "mock.51"; - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } TEST_P(CachingTest, TestNoDeviceArchitecture) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map&) { + .WillRepeatedly(Invoke([&](const std::string&, const ov::AnyMap&) { return std::vector{ov::device::capabilities.name()}; })); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map&) { + .WillRepeatedly(Invoke([&](const std::string&, const ov::AnyMap&) { return std::vector{ov::internal::caching_properties.name()}; })); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map&) { + .WillRepeatedly(Invoke([&](const std::string&, const ov::AnyMap&) { return std::vector{ov::supported_properties}; })); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map&) { - return std::vector{METRIC_KEY(IMPORT_EXPORT_SUPPORT)}; - })); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(decltype(ov::device::capabilities)::value_type{ov::device::capability::EXPORT_IMPORT})); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(0); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); - }); - testLoad([&](Core& ie) { + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); + }); + testLoad([&](ov::Core& core) { deviceToLoad = "mock.0"; - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { + testLoad([&](ov::Core& core) { deviceToLoad = "mock.50"; - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } TEST_P(CachingTest, TestNoCachingProperties) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map&) { + .WillRepeatedly(Invoke([&](const std::string&, const ov::AnyMap&) { return std::vector{ov::device::capabilities.name()}; })); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map&) { + .WillRepeatedly(Invoke([&](const std::string&, const ov::AnyMap&) { return std::vector{}; })); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(0); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map&) { - return std::vector{METRIC_KEY(IMPORT_EXPORT_SUPPORT)}; - })); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(decltype(ov::device::capabilities)::value_type{ov::device::capability::EXPORT_IMPORT})); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(0); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(0); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); - }); - testLoad([&](Core& ie) { + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); + }); + testLoad([&](ov::Core& core) { deviceToLoad = "mock.0"; - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } TEST_P(CachingTest, TestThrowOnExport) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1).WillOnce(Throw(1)); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1).WillOnce(Throw(1)); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - EXPECT_ANY_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + EXPECT_ANY_THROW(m_testFunction(core)); }); } } @@ -1650,79 +1449,80 @@ TEST_P(CachingTest, TestThrowOnExport) { // TODO: temporary behavior is to no re-throw exception on import error (see 54335) // In future add separate 'no throw' test for 'blob_outdated' exception from plugin TEST_P(CachingTest, TestThrowOnImport) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); if (m_remoteContext) { - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(1).WillOnce(Throw(1)); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(1).WillOnce(Throw(1)); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); } else { - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(1).WillOnce(Throw(1)); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(1).WillOnce(Throw(1)); } - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - EXPECT_NO_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + EXPECT_NO_THROW(m_testFunction(core)); }); } { // Step 3: same load, cache is re-created on export on step 2 and shall be successfully imported now - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - EXPECT_NO_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + EXPECT_NO_THROW(m_testFunction(core)); }); } } -TEST_P(CachingTest, TestNetworkModified) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); +TEST_P(CachingTest, TestModelModified) { + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } if (m_type == TestLoadType::EModelName) { @@ -1730,61 +1530,62 @@ TEST_P(CachingTest, TestNetworkModified) { std::fstream stream(modelName, std::fstream::out | std::fstream::app); stream << " "; } else { - // Modify loaded CNN network - m_cnnCallback = [&](CNNNetwork& network) { - network.getInputsInfo()["Param_1"]->setLayout(Layout::NHWC); + // Modify loaded ov::Model + m_modelCallback = [&](std::shared_ptr& model) { + model->get_parameters()[0]->set_layout(ov::Layout("NHWC")); }; } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } m_post_mock_net_callbacks.pop_back(); { // Step 3: same load, should be ok now - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } TEST_P(CachingTest, TestCacheFileCorrupted) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - EXPECT_NO_THROW(ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}})); - EXPECT_NO_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + EXPECT_NO_THROW(core.set_property(ov::cache_dir(m_cacheDir))); + EXPECT_NO_THROW(m_testFunction(core)); }); } { @@ -1796,53 +1597,54 @@ TEST_P(CachingTest, TestCacheFileCorrupted) { } m_post_mock_net_callbacks.pop_back(); { // Step 2. Cache is corrupted, will be silently removed - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - EXPECT_NO_THROW(ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}})); - EXPECT_NO_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + EXPECT_NO_THROW(core.set_property(ov::cache_dir(m_cacheDir))); + EXPECT_NO_THROW(m_testFunction(core)); }); } { // Step 3: same load, should be ok now due to re-creation of cache - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - EXPECT_NO_THROW(ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}})); - EXPECT_NO_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + EXPECT_NO_THROW(core.set_property(ov::cache_dir(m_cacheDir))); + EXPECT_NO_THROW(m_testFunction(core)); }); } } TEST_P(CachingTest, TestCacheFileOldVersion) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)).Times(AnyNumber()); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - EXPECT_NO_THROW(ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}})); - EXPECT_NO_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + EXPECT_NO_THROW(core.set_property(ov::cache_dir(m_cacheDir))); + EXPECT_NO_THROW(m_testFunction(core)); }); } { @@ -1855,7 +1657,7 @@ TEST_P(CachingTest, TestCacheFileOldVersion) { ostr << inp.rdbuf(); content = ostr.str(); } - std::string buildNum = GetInferenceEngineVersion()->buildNumber; + std::string buildNum = ov::get_openvino_version().buildNumber; std::string zeroBuild(buildNum.size(), '0'); auto index = content.find(buildNum); if (index != std::string::npos) { @@ -1869,46 +1671,41 @@ TEST_P(CachingTest, TestCacheFileOldVersion) { } m_post_mock_net_callbacks.pop_back(); { // Step 2. Build number mismatch, cache will be silently removed - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - EXPECT_NO_THROW(ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}})); - EXPECT_NO_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + EXPECT_NO_THROW(core.set_property(ov::cache_dir(m_cacheDir))); + EXPECT_NO_THROW(m_testFunction(core)); }); } m_post_mock_net_callbacks.pop_back(); { // Step 3: same load, should be ok now due to re-creation of cache - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(m_remoteContext ? 1 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(!m_remoteContext ? 1 : 0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(!m_remoteContext ? 1 : 0); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - EXPECT_NO_THROW(ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}})); - EXPECT_NO_THROW(m_testFunction(ie)); + testLoad([&](ov::Core& core) { + EXPECT_NO_THROW(core.set_property(ov::cache_dir(m_cacheDir))); + EXPECT_NO_THROW(m_testFunction(core)); }); } } TEST_P(CachingTest, LoadHetero_NoCacheMetric) { - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(SUPPORTED_METRICS), _)) - .Times(AnyNumber()) - .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(std::vector{})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::supported_properties.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(std::vector{})); // Hetero supports Import/Export, but mock plugin does not @@ -1917,127 +1714,128 @@ TEST_P(CachingTest, LoadHetero_NoCacheMetric) { return; // skip the remote Context test for Hetero plugin } for (int i = 0; i < 2; i++) { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); - EXPECT_CALL(net, GetExecGraphInfo()).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); + EXPECT_CALL(net, get_runtime_model()).Times(0); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); - networks.clear(); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); + comp_models.clear(); }); } } TEST_P(CachingTest, LoadHetero_OneDevice) { - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + // deviceToLoad = "mock"; deviceToLoad = ov::test::utils::DEVICE_HETERO + std::string(":mock"); if (m_remoteContext) { return; // skip the remote Context test for Hetero plugin } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); // Ensure that only 1 blob (for Hetero) is created EXPECT_EQ(ov::test::utils::listFilesWithExt(m_cacheDir, "blob").size(), 1); } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(1); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(1); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); - networks.clear(); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); + comp_models.clear(); }); } } TEST_P(CachingTest, LoadHetero_TargetFallbackFromCore) { - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); deviceToLoad = ov::test::utils::DEVICE_HETERO; if (m_remoteContext) { return; // skip the remote Context test for Hetero plugin } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); - }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - ie.SetConfig({{"TARGET_FALLBACK", "mock"}}, ov::test::utils::DEVICE_HETERO); - m_testFunction(ie); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); + }); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock"}}); + m_testFunction(core); }); // Ensure that only 1 blob (for Hetero) is created EXPECT_EQ(ov::test::utils::listFilesWithExt(m_cacheDir, "blob").size(), 1); } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(1); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(1); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - ie.SetConfig({{"TARGET_FALLBACK", "mock"}}, ov::test::utils::DEVICE_HETERO); - m_testFunction(ie); - networks.clear(); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock"}}); + m_testFunction(core); + comp_models.clear(); }); } } TEST_P(CachingTest, LoadHetero_MultiArchs) { - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)) + EXPECT_CALL(*mockPlugin, query_model(_, _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const CNNNetwork& network, const std::map& config) { - QueryNetworkResult res; - auto function = network.getFunction(); - EXPECT_TRUE(function); - - auto id = config.at("DEVICE_ID"); - bool supportsRelu = std::stoi(id) < 10; - - for (auto&& node : function->get_ops()) { - std::string nodeType = node->get_type_name(); - if ((nodeType == "Relu" && supportsRelu) || (nodeType != "Relu" && !supportsRelu)) { - res.supportedLayersMap.emplace(node->get_friendly_name(), deviceName + "." + id); + .WillRepeatedly( + Invoke([&](const std::shared_ptr& model, const ov::AnyMap& config) -> ov::SupportedOpsMap { + ov::SupportedOpsMap res; + EXPECT_TRUE(model); + + auto id = config.at("DEVICE_ID").as(); + bool supportsRelu = std::stoi(id) < 10; + + for (auto&& node : model->get_ops()) { + std::string nodeType = node->get_type_name(); + if ((nodeType == "Relu" && supportsRelu) || (nodeType != "Relu" && !supportsRelu)) { + res.emplace(node->get_friendly_name(), deviceName + "." + id); + } } - } - return res; - })); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)) + return res; + })); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map& options) { + .WillRepeatedly(Invoke([&](const std::string&, const ov::AnyMap& options) { auto id = options.at("DEVICE_ID").as(); if (std::stoi(id) < 10) { return "mock_first_architecture"; @@ -2050,16 +1848,17 @@ TEST_P(CachingTest, LoadHetero_MultiArchs) { return; // skip the remote Context test for Hetero plugin } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(AtLeast(2)); // for .1 and for .51 - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(AtLeast(1)); - }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(AtLeast(2)); // for .1 and for .51 + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(AtLeast(1)); + }); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); // Ensure that only 1 blob (for Hetero) is created EXPECT_EQ(ov::test::utils::listFilesWithExt(m_cacheDir, "blob").size(), 1); @@ -2067,43 +1866,43 @@ TEST_P(CachingTest, LoadHetero_MultiArchs) { deviceToLoad = ov::test::utils::DEVICE_HETERO + std::string(":mock.2,mock.52"); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(AtLeast(2)); // for .2 and for .52 - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(AtLeast(2)); // for .2 and for .52 + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } deviceToLoad = ov::test::utils::DEVICE_HETERO + std::string(":mock.53,mock.3"); m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(AtLeast(1)); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(AtLeast(1)); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(AtLeast(1)); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(AtLeast(1)); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); - networks.clear(); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); + comp_models.clear(); }); } } TEST_P(CachingTest, LoadHetero_MultiArchs_TargetFallback_FromCore) { - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)) + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map& options) { + .WillRepeatedly(Invoke([&](const std::string&, const ov::AnyMap& options) { auto id = options.at("DEVICE_ID").as(); if (std::stoi(id) < 10) { return "mock_first_architecture"; @@ -2116,47 +1915,47 @@ TEST_P(CachingTest, LoadHetero_MultiArchs_TargetFallback_FromCore) { return; // skip the remote Context test for Hetero plugin } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - ie.SetConfig({{"TARGET_FALLBACK", "mock.1"}}, ov::test::utils::DEVICE_HETERO); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock.1"}}); + m_testFunction(core); }); } m_post_mock_net_callbacks.pop_back(); { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(1); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(1); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - ie.SetConfig({{"TARGET_FALLBACK", "mock.1"}}, ov::test::utils::DEVICE_HETERO); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock.1"}}); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); - testLoad([&](Core& ie) { - ie.SetConfig({{"TARGET_FALLBACK", "mock.51"}}, ov::test::utils::DEVICE_HETERO); - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); - networks.clear(); + testLoad([&](ov::Core& core) { + core.set_property(ov::test::utils::DEVICE_HETERO, {{"TARGET_FALLBACK", "mock.51"}}); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); + comp_models.clear(); }); } } @@ -2166,91 +1965,97 @@ TEST_P(CachingTest, LoadHetero_MultiArchs_TargetFallback_FromCore) { // Single device TEST_P(CachingTest, LoadAUTO_OneDevice) { const auto TEST_COUNT = 2; - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); if (m_remoteContext) { return; // skip the remote Context test for Auto plugin } - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); std::string cacheDir = m_cacheDir; MkDirGuard guard(cacheDir); for (int index = 0; index < TEST_COUNT; index++) { deviceToLoad = ov::test::utils::DEVICE_AUTO; deviceToLoad += ":mock.0"; - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(TEST_COUNT - index - 1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(index); - ASSERT_NO_THROW(testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), cacheDir}}); - m_testFunction(ie); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(TEST_COUNT - index - 1); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(index); + ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(cacheDir)); + m_testFunction(core); })); } std::cout << "Caching LoadAuto Test completed. Tried " << TEST_COUNT << " times" << std::endl; } // AUTO-DEVICE test -// load network with config +// load model with config TEST_P(CachingTest, LoadAUTOWithConfig) { const auto TEST_COUNT = 2; - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); if (m_remoteContext) { return; // skip the remote Context test for Auto plugin } int index = 0; - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); std::string cacheDir = m_cacheDir; MkDirGuard guard(cacheDir); for (; index < TEST_COUNT; index++) { deviceToLoad = ov::test::utils::DEVICE_AUTO; deviceToLoad += ":mock.0"; - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(TEST_COUNT - index - 1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(index); - ASSERT_NO_THROW(testLoad([&](Core& ie) { - m_testFunctionWithCfg(ie, {{CONFIG_KEY(CACHE_DIR), cacheDir}}); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(TEST_COUNT - index - 1); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(index); + ASSERT_NO_THROW(testLoad([&](ov::Core& core) { + m_testFunctionWithCfg(core, {{ov::cache_dir.name(), cacheDir}}); })); } std::cout << "Caching LoadAuto Test completed. Tried " << index << " times" << std::endl; } // Single device not support import/export TEST_P(CachingTest, LoadAUTO_OneDeviceNoImportExport) { - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)) + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)) .Times(AnyNumber()) .WillRepeatedly(Return(false)); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)) + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(decltype(ov::device::capabilities)::value_type{})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); if (m_remoteContext) { return; // skip the remote Context test for Auto plugin } - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(!m_remoteContext ? 2 : 0); - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(m_type == TestLoadType::EModelName ? 2 : 0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - testLoad([&](Core& ie) { - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 2 : 0); + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 2 : 0); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + testLoad([&](ov::Core& core) { + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); }); deviceToLoad = ov::test::utils::DEVICE_AUTO; deviceToLoad += ":mock.0"; - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); m_post_mock_net_callbacks.pop_back(); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(0); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(0); }); - m_testFunction(ie); + m_testFunction(core); }); } // MULTI-DEVICE test @@ -2259,17 +2064,18 @@ TEST_P(CachingTest, LoadAUTO_OneDeviceNoImportExport) { TEST_P(CachingTest, LoadMulti_race) { const auto TEST_DURATION_MS = 2000; const auto TEST_DEVICE_MAX_COUNT = 10; - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); if (m_remoteContext) { return; // skip the remote Context test for Multi plugin } int index = 0; auto start = high_resolution_clock::now(); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); do { std::string cacheDir = m_cacheDir + std::to_string(index); @@ -2281,13 +2087,13 @@ TEST_P(CachingTest, LoadMulti_race) { deviceToLoad += ",mock." + std::to_string(i); } - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(devCount - 1); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), cacheDir}}); - ASSERT_NO_THROW(m_testFunction(ie)); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(devCount - 1); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(cacheDir)); + ASSERT_NO_THROW(m_testFunction(core)); }); index++; } while (duration_cast(high_resolution_clock::now() - start).count() < TEST_DURATION_MS); @@ -2295,22 +2101,23 @@ TEST_P(CachingTest, LoadMulti_race) { } // MULTI-DEVICE test -// Test that it is safe to load multiple devices through loadNetwork +// Test that it is safe to load multiple devices through compile_model // In case of sporadic failures - increase 'TEST_DURATION_MS' 100x times for better reproducibility TEST_P(CachingTest, LoadMultiWithConfig_race) { const auto TEST_DURATION_MS = 2000; const auto TEST_DEVICE_MAX_COUNT = 10; - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); if (m_remoteContext) { return; // skip the remote Context test for Multi plugin } int index = 0; auto start = high_resolution_clock::now(); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); do { std::string cacheDir = m_cacheDir + std::to_string(index); @@ -2322,12 +2129,12 @@ TEST_P(CachingTest, LoadMultiWithConfig_race) { deviceToLoad += ",mock." + std::to_string(i); } - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(devCount - 1); - testLoad([&](Core& ie) { - ASSERT_NO_THROW(m_testFunctionWithCfg(ie, {{CONFIG_KEY(CACHE_DIR), cacheDir}})); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(devCount - 1); + testLoad([&](ov::Core& core) { + ASSERT_NO_THROW(m_testFunctionWithCfg(core, {{ov::cache_dir.name(), cacheDir}})); }); index++; } while (duration_cast(high_resolution_clock::now() - start).count() < TEST_DURATION_MS); @@ -2339,12 +2146,13 @@ TEST_P(CachingTest, LoadMultiWithConfig_race) { // In case of sporadic failures - increase 'TEST_DEVICE_MAX_COUNT' 100x times for better reproducibility TEST_P(CachingTest, LoadMulti_Archs) { const auto TEST_DEVICE_MAX_COUNT = 30; // Shall be >= 2 - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)) + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)) .Times(AnyNumber()) - .WillRepeatedly(Invoke([&](const std::string&, const std::map& options) { + .WillRepeatedly(Invoke([&](const std::string&, const ov::AnyMap& options) { auto id = options.at("DEVICE_ID").as(); auto i = std::stoi(id) / 2; return "mock_architecture" + std::to_string(i); @@ -2360,30 +2168,27 @@ TEST_P(CachingTest, LoadMulti_Archs) { } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(TEST_DEVICE_MAX_COUNT / 2); - // Load network from file shall not be called for plugins with caching supported - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()).Times(0); - - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)) + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(TEST_DEVICE_MAX_COUNT / 2); + // Load model from file shall not be called for plugins with caching supported + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(0); + + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)) .Times(TEST_DEVICE_MAX_COUNT / 2) - .WillRepeatedly(Invoke([&](std::istream& s, const std::map&) { + .WillRepeatedly(Invoke([&](std::istream& s, const ov::AnyMap&) { std::string name; s >> name; std::lock_guard lock(mock_creation_mutex); - return createMockIExecutableNet({}, - m_inputs_map[name], - m_outputs_map[name], - m_inputs[name], - m_outputs[name]); + return create_mock_compiled_model(m_models[name], mockPlugin); })); - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); // each net will be exported once + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); // each net will be exported once }); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - m_testFunction(ie); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); }); } } @@ -2393,27 +2198,24 @@ TEST_P(CachingTest, LoadMulti_Archs) { // In case of sporadic failures - increase 'TEST_DEVICE_MAX_COUNT' 100x times for better reproducibility TEST_P(CachingTest, LoadMulti_NoCachingOnDevice) { const auto TEST_DEVICE_MAX_COUNT = 100; // Looks enough to catch potential race conditions - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)) + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capability::EXPORT_IMPORT, _)) .Times(AnyNumber()) - .WillRepeatedly(Return(Parameter{false})); - EXPECT_CALL(*mockPlugin, GetMetric(ov::device::capabilities.name(), _)) + .WillRepeatedly(Return(false)); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)) .Times(AnyNumber()) .WillRepeatedly(Return(decltype(ov::device::capabilities)::value_type{})); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); - - DataPtr inData = std::make_shared("Param_1", Precision::FP32); - InputInfo inpInfo; - inpInfo.setInputData(inData); - InputInfo::CPtr cptr = std::make_shared(inpInfo); - ConstInputsDataMap inputMap{{"Param_1", cptr}}; - CDataPtr dataptr = std::make_shared("Reshape_2", Precision::FP32); - ConstOutputsDataMap outputMap{{"Reshape_2", dataptr}}; - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, GetInputsInfo()).Times(AnyNumber()).WillRepeatedly(Return(inputMap)); - EXPECT_CALL(net, GetOutputsInfo()).Times(AnyNumber()).WillRepeatedly(Return(outputMap)); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + + std::vector> ins; + auto param = std::make_shared(ov::element::f32, ov::PartialShape{1, 3, 2, 2}); + ins.emplace_back(param->output(0)); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& model) { + EXPECT_CALL(model, inputs()).Times(AnyNumber()).WillRepeatedly(ReturnRefOfCopy(ins)); + EXPECT_CALL(model, outputs()).Times(AnyNumber()).WillRepeatedly(ReturnRefOfCopy(ins)); }); if (m_remoteContext) { return; // skip the remote Context test for Multi plugin @@ -2426,24 +2228,25 @@ TEST_P(CachingTest, LoadMulti_NoCachingOnDevice) { } { - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(TEST_DEVICE_MAX_COUNT); - // Load network from file shall not be called by Multi plugin for devices with caching supported - EXPECT_CALL(*mockPlugin, OnLoadNetworkFromFile()) - .Times(m_type == TestLoadType::ECNN ? 0 : TEST_DEVICE_MAX_COUNT); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(0); - for (auto& net : networks) { - EXPECT_CALL(*net, Export(_)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(TEST_DEVICE_MAX_COUNT); + // Load model from file shall not be called by Multi plugin for devices with caching supported + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()) + .Times(m_type == TestLoadType::EModel ? 0 : TEST_DEVICE_MAX_COUNT); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(0); + for (auto& net : comp_models) { + EXPECT_CALL(*net, export_model(_)).Times(0); } - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - ExecutableNetwork exeNet; - exeNet = m_testFunction(ie); - // Verify that inputs and outputs are set for Multi Executable Network - ASSERT_EQ(exeNet.GetInputsInfo().size(), inputMap.size()); - ASSERT_EQ(exeNet.GetOutputsInfo().size(), outputMap.size()); - networks.clear(); + testLoad([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + ov::CompiledModel model; + model = m_testFunction(core); + // Verify that inputs and outputs are set for Multi Compiled Model + ASSERT_EQ(model.inputs().size(), ins.size()); + ASSERT_EQ(model.outputs().size(), ins.size()); + comp_models.clear(); }); } } @@ -2451,30 +2254,37 @@ TEST_P(CachingTest, LoadMulti_NoCachingOnDevice) { #if defined(ENABLE_AUTO_BATCH) // BATCH-DEVICE test -// load network with config +// load model with config TEST_P(CachingTest, LoadBATCHWithConfig) { const auto TEST_COUNT = 2; - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::hint::performance_mode.name(), _)) + .Times(AnyNumber()) + .WillRepeatedly(Return([] { + return ov::hint::PerformanceMode::THROUGHPUT; + })); if (m_remoteContext) { return; // skip the remote Context test for Auto plugin } int index = 0; - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); std::string cacheDir = m_cacheDir; MkDirGuard guard(cacheDir); for (; index < TEST_COUNT; index++) { deviceToLoad = ov::test::utils::DEVICE_BATCH; deviceToLoad += ":mock.0"; - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(TEST_COUNT - index - 1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(index); - ASSERT_NO_THROW(testLoad([&](Core& ie) { - m_testFunctionWithCfg(ie, {{CONFIG_KEY(CACHE_DIR), cacheDir}}); - })); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(TEST_COUNT - index - 1); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(index); + testLoad([&](ov::Core& core) { + m_testFunctionWithCfg(core, {{ov::cache_dir.name(), cacheDir}}); + }); } std::cout << "Caching LoadAuto Test completed. Tried " << index << " times" << std::endl; } @@ -2484,31 +2294,31 @@ TEST_P(CachingTest, LoadBATCHWithConfig) { TEST_P(CachingTest, Load_threads) { const auto TEST_DURATION_MS = 2000; const auto THREADS_COUNT = 4; - EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); - EXPECT_CALL(*mockPlugin, GetMetric(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, query_model(_, _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); if (m_remoteContext) { return; // skip the remote Context test for Multi plugin } auto start = high_resolution_clock::now(); int index = 0; - m_post_mock_net_callbacks.emplace_back([&](MockExecutableNetwork& net) { - EXPECT_CALL(net, Export(_)).Times(1); + m_post_mock_net_callbacks.emplace_back([&](MockICompiledModelImpl& net) { + EXPECT_CALL(net, export_model(_)).Times(1); }); do { std::string cacheDir = m_cacheDir + std::to_string(index); MkDirGuard guard(cacheDir); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, LoadExeNetworkImpl(_, _)).Times(1); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _, _)).Times(0); - EXPECT_CALL(*mockPlugin, ImportNetwork(_, _)).Times(THREADS_COUNT - 1); - testLoad([&](Core& ie) { - ie.SetConfig({{CONFIG_KEY(CACHE_DIR), cacheDir}}); + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)).Times(1); + EXPECT_CALL(*mockPlugin, import_model(_, _, _)).Times(0); + EXPECT_CALL(*mockPlugin, import_model(_, _)).Times(THREADS_COUNT - 1); + testLoad([&](ov::Core& core) { + core.set_property({{ov::cache_dir.name(), cacheDir}}); std::vector threads; for (int i = 0; i < THREADS_COUNT; i++) { threads.emplace_back(([&]() { - m_testFunction(ie); + m_testFunction(core); })); } for (int i = 0; i < THREADS_COUNT; i++) { @@ -2530,4 +2340,4 @@ INSTANTIATE_TEST_SUITE_P(CachingTest, CachingTest, ::testing::Combine(::testing::ValuesIn(loadVariants), ::testing::ValuesIn(cacheFolders)), getTestCaseName); -#endif // defined(ENABLE_OV_IR_FRONTEND) \ No newline at end of file +#endif // defined(ENABLE_OV_IR_FRONTEND) diff --git a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp index bac255154d2857..6ca6a9e5caab65 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp +++ b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp @@ -22,17 +22,9 @@ class MockInternalPlugin : public ov::IPlugin { ov::IPlugin* m_plugin = nullptr; - std::shared_ptr m_converted_plugin; - InferenceEngine::IInferencePlugin* m_old_plugin = nullptr; ov::AnyMap config; public: - explicit MockInternalPlugin(InferenceEngine::IInferencePlugin* target) : m_old_plugin(target) { - std::shared_ptr shared_target(target, - [](InferenceEngine::IInferencePlugin*) {}); - m_converted_plugin = InferenceEngine::convert_plugin(shared_target); - m_plugin = m_converted_plugin.get(); - } explicit MockInternalPlugin(ov::IPlugin* target) : m_plugin(target) {} explicit MockInternalPlugin() = default; @@ -113,15 +105,6 @@ class MockInternalPlugin : public ov::IPlugin { m_plugin->set_device_name(dev_name); } } - if (m_old_plugin) { - if (!m_old_plugin->GetCore() && core) { - auto old_core = std::static_pointer_cast(core); - m_old_plugin->SetCore(old_core); - } - if (m_old_plugin->GetName().empty()) { - m_old_plugin->SetName(dev_name); - } - } } }; @@ -206,11 +189,6 @@ OPENVINO_PLUGIN_API void CreatePluginEngine(std::shared_ptr& plugin plugin = std::make_shared(internal_plugin); } -OPENVINO_PLUGIN_API void InjectProxyEngine(InferenceEngine::IInferencePlugin* target) { - std::lock_guard lock(targets_mutex); - targets.push(std::make_shared(target)); -} - OPENVINO_PLUGIN_API void InjectPlugin(ov::IPlugin* target) { std::lock_guard lock(targets_mutex); targets.push(std::make_shared(target)); From 75b6a24787bfa412cdeaa36cf5c0111d1aa9aa95 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 5 Sep 2023 00:42:42 +0400 Subject: [PATCH 11/16] Remove ICore legacy mock object (#19573) --- .../tests/unit/include/auto_unit_test.hpp | 6 +- .../tests/unit/async_infer_request_test.cpp | 9 +- ...ompile_model_create_infer_request_test.cpp | 8 +- .../unit/compile_model_get_property_test.cpp | 6 +- .../compile_model_get_runtime_model_test.cpp | 8 +- .../unit/compile_model_set_property_test.cpp | 6 +- .../tests/unit/parse_meta_device_test.cpp | 6 +- .../tests/unit/plugin_compile_model_test.cpp | 8 +- .../tests/unit/plugin_get_property_test.cpp | 3 +- .../tests/unit/plugin_query_model_test.cpp | 8 +- .../tests/unit/sync_infer_request_test.cpp | 9 +- src/tests/test_utils/unit_test_utils/mock.cpp | 1 - .../cpp_interfaces/interface/mock_icore.hpp | 104 ------------------ 13 files changed, 39 insertions(+), 143 deletions(-) delete mode 100644 src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp diff --git a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp index 3c6d5236410078..1142c7d871cad0 100644 --- a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp +++ b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp @@ -11,7 +11,7 @@ #include "gmock_plugin.hpp" #include "mock_common.hpp" #include -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using ::testing::MatcherCast; using ::testing::AllOf; @@ -84,7 +84,7 @@ class BaseTest { // for auto unit tests which can covered by mock core, or need to test with gmock icore class AutoTest : public BaseTest { public: - std::shared_ptr> core; + std::shared_ptr> core; AutoTest(); ~AutoTest(); }; @@ -112,4 +112,4 @@ class AutoTestWithRealCore : public BaseTest { }; } // namespace tests } // namespace mock_auto_plugin -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp index 2ed487a7a8b807..48f7a92b98d0cd 100644 --- a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp @@ -11,7 +11,8 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" #include "transformations/utils/utils.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" + using ::testing::_; using ::testing::AnyNumber; using ::testing::AtLeast; @@ -33,7 +34,7 @@ class AutoBatchAsyncInferRequestTest : public ::testing::TestWithParam m_model; std::shared_ptr m_batched_model; - std::shared_ptr> m_core; + std::shared_ptr> m_core; std::shared_ptr> m_auto_batch_plugin; std::shared_ptr> m_hardware_plugin; @@ -117,7 +118,7 @@ class AutoBatchAsyncInferRequestTest : public ::testing::TestWithParam>(new NiceMock()); + m_core = std::shared_ptr>(new NiceMock()); m_auto_batch_plugin = std::shared_ptr>(new NiceMock()); @@ -321,4 +322,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, ::testing::Combine(::testing::ValuesIn(batch_size_param), ::testing::ValuesIn(element_type_param), ::testing::ValuesIn(infer_interval_timeout_param)), - AutoBatchAsyncInferRequestTest::getTestCaseName); \ No newline at end of file + AutoBatchAsyncInferRequestTest::getTestCaseName); diff --git a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp index 8ffc323d8e1ba2..3f9b938d4c802d 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp @@ -9,7 +9,7 @@ #include "ngraph_functions/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -32,7 +32,7 @@ using CreateInferRequestTestParams = std::tuple { public: std::shared_ptr m_model; - std::shared_ptr> m_core; + std::shared_ptr> m_core; std::shared_ptr> m_auto_batch_plugin; std::shared_ptr> m_i_compile_model_without_batch; @@ -84,7 +84,7 @@ class CompileModelCreateInferRequestTest : public ::testing::TestWithParamGetParam(); m_model = ngraph::builder::subgraph::makeMultiSingleConv(); - m_core = std::shared_ptr>(new NiceMock()); + m_core = std::shared_ptr>(new NiceMock()); m_auto_batch_plugin = std::shared_ptr>(new NiceMock()); @@ -145,4 +145,4 @@ const std::vector batch_size{1, 8, 16, 32, 128, 256}; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, CompileModelCreateInferRequestTest, ::testing::Combine(::testing::ValuesIn(batch_size), ::testing::ValuesIn(requests_num)), - CompileModelCreateInferRequestTest::getTestCaseName); \ No newline at end of file + CompileModelCreateInferRequestTest::getTestCaseName); diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp index c9c56ebfb72360..67c0016469916a 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp @@ -8,7 +8,7 @@ #include "mock_common.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -32,7 +32,7 @@ class CompileModelGetPropertyTest : public ::testing::TestWithParam> m_core; + std::shared_ptr> m_core; std::shared_ptr> m_plugin; std::shared_ptr m_model; @@ -68,7 +68,7 @@ class CompileModelGetPropertyTest : public ::testing::TestWithParamGetParam(); m_model = ngraph::builder::subgraph::makeMultiSingleConv(); - m_core = std::shared_ptr>(new NiceMock()); + m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); m_plugin->set_core(m_core); diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp index f5338dd54d5815..5d921ecd499457 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp @@ -8,7 +8,7 @@ #include "mock_common.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -27,7 +27,7 @@ using namespace ov::mock_autobatch_plugin; class CompileModelGetRuntimeModelTest : public ::testing::Test { public: - std::shared_ptr> m_core; + std::shared_ptr> m_core; std::shared_ptr> m_plugin; std::shared_ptr m_model; @@ -49,7 +49,7 @@ class CompileModelGetRuntimeModelTest : public ::testing::Test { void SetUp() override { m_model = ngraph::builder::subgraph::makeMultiSingleConv(); - m_core = std::shared_ptr>(new NiceMock()); + m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); m_plugin->set_core(m_core); @@ -96,4 +96,4 @@ class CompileModelGetRuntimeModelTest : public ::testing::Test { TEST_F(CompileModelGetRuntimeModelTest, CompileModelGetRuntimeModelTestCase) { ASSERT_NO_THROW(m_auto_batch_compile_model->get_runtime_model()); -} \ No newline at end of file +} diff --git a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp index b15ba2b9a248fc..c3d8057f1831bf 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp @@ -8,7 +8,7 @@ #include "mock_common.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -32,7 +32,7 @@ class CompileModelSetPropertyTest : public ::testing::TestWithParam> m_core; + std::shared_ptr> m_core; std::shared_ptr> m_plugin; std::shared_ptr m_model; @@ -71,7 +71,7 @@ class CompileModelSetPropertyTest : public ::testing::TestWithParamGetParam(); m_model = ngraph::builder::subgraph::makeMultiSingleConv(); - m_core = std::shared_ptr>(new NiceMock()); + m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); m_plugin->set_core(m_core); diff --git a/src/plugins/auto_batch/tests/unit/parse_meta_device_test.cpp b/src/plugins/auto_batch/tests/unit/parse_meta_device_test.cpp index 8788b74b060760..e6e99f201893aa 100644 --- a/src/plugins/auto_batch/tests/unit/parse_meta_device_test.cpp +++ b/src/plugins/auto_batch/tests/unit/parse_meta_device_test.cpp @@ -6,7 +6,7 @@ #include #include "mock_common.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -37,7 +37,7 @@ const std::vector gpu_supported_properties = { class ParseMetaDeviceTest : public ::testing::TestWithParam { public: - std::shared_ptr> m_core; + std::shared_ptr> m_core; std::shared_ptr> m_plugin; std::string m_batch_cfg; @@ -68,7 +68,7 @@ class ParseMetaDeviceTest : public ::testing::TestWithParam } void SetUp() override { - m_core = std::shared_ptr>(new NiceMock()); + m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); m_plugin->set_core(m_core); diff --git a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp index f6596e4b1629c9..d69335582528a1 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp @@ -8,7 +8,7 @@ #include "mock_common.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -35,7 +35,7 @@ class PluginCompileModelTest : public ::testing::TestWithParam> m_core; + std::shared_ptr> m_core; std::shared_ptr> m_plugin; std::shared_ptr m_model; ov::SoPtr m_remote_context; @@ -73,7 +73,7 @@ class PluginCompileModelTest : public ::testing::TestWithParamGetParam(); - m_core = std::shared_ptr>(new NiceMock()); + m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); m_plugin->set_core(m_core); @@ -229,4 +229,4 @@ const std::vector plugin_compile_model_param_test = INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, PluginCompileModelTest, ::testing::ValuesIn(plugin_compile_model_param_test), - PluginCompileModelTest::getTestCaseName); \ No newline at end of file + PluginCompileModelTest::getTestCaseName); diff --git a/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp index b6a8cdaea6a061..5d259789333310 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_get_property_test.cpp @@ -6,7 +6,6 @@ #include #include "mock_common.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -99,4 +98,4 @@ const std::vector get_property_params_test = { INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, GetPropertyTest, ::testing::ValuesIn(get_property_params_test), - GetPropertyTest::getTestCaseName); \ No newline at end of file + GetPropertyTest::getTestCaseName); diff --git a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp index 713df840ea30b2..e04a6e897cfe33 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp @@ -7,7 +7,7 @@ #include "mock_common.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -29,7 +29,7 @@ class QueryModelTest : public ::testing::TestWithParam { public: ov::AnyMap m_properties; bool m_throw_exception; - std::shared_ptr> m_core; + std::shared_ptr> m_core; std::shared_ptr> m_plugin; std::shared_ptr m_model; ov::SupportedOpsMap m_supported_ops_map; @@ -61,7 +61,7 @@ class QueryModelTest : public ::testing::TestWithParam { void SetUp() override { std::tie(m_properties, m_throw_exception) = this->GetParam(); m_model = ngraph::builder::subgraph::makeMultiSingleConv(); - m_core = std::shared_ptr>(new NiceMock()); + m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); m_plugin->set_core(m_core); @@ -88,4 +88,4 @@ const std::vector query_model_params_test = { INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, QueryModelTest, ::testing::ValuesIn(query_model_params_test), - QueryModelTest::getTestCaseName); \ No newline at end of file + QueryModelTest::getTestCaseName); diff --git a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp index 8d6e52f017fb48..95713fea538a8f 100644 --- a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp @@ -11,7 +11,8 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" #include "transformations/utils/utils.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" + using ::testing::_; using ::testing::AnyNumber; using ::testing::AtLeast; @@ -31,7 +32,7 @@ using AutoBatchRequestTestParams = std::tuple { public: std::shared_ptr m_model; - std::shared_ptr> m_core; + std::shared_ptr> m_core; std::shared_ptr> m_auto_batch_plugin; std::shared_ptr> m_i_compile_model_without_batch; @@ -96,7 +97,7 @@ class AutoBatchRequestTest : public ::testing::TestWithParamGetParam(); std::vector inputShape = {1, 3, 24, 24}; m_model = ngraph::builder::subgraph::makeMultiSingleConv(inputShape, m_element_type); - m_core = std::shared_ptr>(new NiceMock()); + m_core = std::shared_ptr>(new NiceMock()); m_auto_batch_plugin = std::shared_ptr>(new NiceMock()); @@ -254,4 +255,4 @@ const std::vector batch_size{1, 8, 16, 32, 64, 128}; INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, AutoBatchRequestTest, ::testing::Combine(::testing::ValuesIn(batch_size), ::testing::ValuesIn(element_type)), - AutoBatchRequestTest::getTestCaseName); \ No newline at end of file + AutoBatchRequestTest::getTestCaseName); diff --git a/src/tests/test_utils/unit_test_utils/mock.cpp b/src/tests/test_utils/unit_test_utils/mock.cpp index 81a946e499d312..bd117b9e751384 100644 --- a/src/tests/test_utils/unit_test_utils/mock.cpp +++ b/src/tests/test_utils/unit_test_utils/mock.cpp @@ -15,7 +15,6 @@ #include "unit_test_utils/mocks/cpp_interfaces/impl/mock_executable_thread_safe_default.hpp" #include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp" diff --git a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp b/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp deleted file mode 100644 index e315c212d81af1..00000000000000 --- a/src/tests/test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "ie_icore.hpp" -#include "openvino/runtime/icompiled_model.hpp" - -class MockICore : public InferenceEngine::ICore { -public: - MOCK_CONST_METHOD3(ReadNetwork, - InferenceEngine::CNNNetwork(const std::string&, const InferenceEngine::Blob::CPtr&, bool)); - MOCK_CONST_METHOD2(ReadNetwork, InferenceEngine::CNNNetwork(const std::string&, const std::string&)); - - MOCK_METHOD3(LoadNetwork, - InferenceEngine::SoExecutableNetworkInternal(const InferenceEngine::CNNNetwork&, - const std::string&, - const std::map&)); - MOCK_METHOD3(LoadNetwork, - InferenceEngine::SoExecutableNetworkInternal(const InferenceEngine::CNNNetwork&, - const std::shared_ptr&, - const std::map&)); - MOCK_METHOD4( - LoadNetwork, - InferenceEngine::SoExecutableNetworkInternal(const std::string&, - const std::string&, - const std::map&, - const std::function&)); - MOCK_METHOD5( - LoadNetwork, - InferenceEngine::SoExecutableNetworkInternal(const std::string&, - const InferenceEngine::Blob::CPtr&, - const std::string&, - const std::map&, - const std::function&)); - - MOCK_METHOD3(ImportNetwork, - InferenceEngine::SoExecutableNetworkInternal(std::istream&, - const std::string&, - const std::map&)); - MOCK_METHOD3(ImportNetwork, - InferenceEngine::SoExecutableNetworkInternal(std::istream&, - const std::shared_ptr&, - const std::map&)); - - MOCK_METHOD2(CreateContext, - InferenceEngine::RemoteContext::Ptr(const std::string& deviceName, - const InferenceEngine::ParamMap& params)); - - MOCK_CONST_METHOD3(QueryNetwork, - InferenceEngine::QueryNetworkResult(const InferenceEngine::CNNNetwork&, - const std::string&, - const std::map&)); - - MOCK_CONST_METHOD3(GetMetric, ov::Any(const std::string&, const std::string&, const ov::AnyMap&)); - MOCK_CONST_METHOD2(GetConfig, ov::Any(const std::string&, const std::string&)); - MOCK_CONST_METHOD3(get_property, ov::Any(const std::string&, const std::string&, const ov::AnyMap&)); - MOCK_CONST_METHOD2(get_property, ov::Any(const std::string&, const std::string&)); - MOCK_CONST_METHOD0(GetAvailableDevices, std::vector()); - MOCK_CONST_METHOD1(DeviceSupportsModelCaching, bool(const std::string&)); // NOLINT not a cast to bool - MOCK_METHOD2(GetSupportedConfig, - std::map(const std::string&, const std::map&)); - MOCK_CONST_METHOD2(get_supported_property, ov::AnyMap(const std::string&, const ov::AnyMap&)); - MOCK_CONST_METHOD0(isNewAPI, bool()); - MOCK_METHOD1(GetDefaultContext, InferenceEngine::RemoteContext::Ptr(const std::string&)); - - MOCK_CONST_METHOD0(is_new_api, bool()); - MOCK_CONST_METHOD2(create_context, - ov::SoPtr(const std::string& deviceName, const ov::AnyMap& params)); - MOCK_CONST_METHOD0(get_available_devices, std::vector()); - MOCK_CONST_METHOD3(query_model, - ov::SupportedOpsMap(const std::shared_ptr&, - const std::string&, - const ov::AnyMap&)); - MOCK_CONST_METHOD3(import_model, - ov::SoPtr(std::istream&, const std::string&, const ov::AnyMap&)); - MOCK_CONST_METHOD3(compile_model, - ov::SoPtr(const std::shared_ptr&, - const std::string&, - const ov::AnyMap&)); - MOCK_CONST_METHOD3(compile_model, - ov::SoPtr(const std::shared_ptr&, - const ov::SoPtr&, - const ov::AnyMap&)); - MOCK_CONST_METHOD3(compile_model, - ov::SoPtr(const std::string&, const std::string&, const ov::AnyMap&)); - MOCK_CONST_METHOD4( - compile_model, - ov::SoPtr(const std::string&, const ov::Tensor&, const std::string&, const ov::AnyMap&)); - MOCK_CONST_METHOD3(read_model, std::shared_ptr(const std::string&, const ov::Tensor&, bool)); - MOCK_CONST_METHOD2(read_model, std::shared_ptr(const std::string&, const std::string&)); - MOCK_CONST_METHOD1(get_default_context, ov::SoPtr(const std::string&)); - MOCK_CONST_METHOD3(import_model, - ov::SoPtr(std::istream&, - const ov::SoPtr&, - const ov::AnyMap&)); - MOCK_CONST_METHOD1(device_supports_model_caching, bool(const std::string&)); - MOCK_METHOD2(set_property, void(const std::string& device_name, const ov::AnyMap& properties)); - - ~MockICore() = default; -}; From c3b0c531f7134cc97859295700f08913e4b8e80e Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 5 Sep 2023 09:33:35 +0400 Subject: [PATCH 12/16] Added tflite to 'predefined_frontends' list (#19599) --- src/frontends/common/src/manager.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frontends/common/src/manager.cpp b/src/frontends/common/src/manager.cpp index 2fa59398b11492..fa917dadb1caad 100644 --- a/src/frontends/common/src/manager.cpp +++ b/src/frontends/common/src/manager.cpp @@ -49,6 +49,7 @@ class FrontEndManager::Impl { {"ir", "ir"}, {"onnx", "onnx"}, {"tf", "tensorflow"}, + {"tflite", "tensorflow_lite"}, {"paddle", "paddle"}, {"pytorch", "pytorch"}, }; From 4eadef9e615cb5078da3ddf53fe39c891269fe93 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Tue, 5 Sep 2023 09:41:01 +0400 Subject: [PATCH 13/16] [TF Hub][TF FE] Make TF Hub validation more robust and add convenient xfail test marking (#19596) * [TF Hub][TF FE] Use multiprocessing based tests for TF Hub validation Signed-off-by: Kazantsev, Roman * Fix import and initialization * [TF Hub][TF FE] Make TF Hub validation more robust and add convenient marking for failing cases Signed-off-by: Kazantsev, Roman --------- Signed-off-by: Kazantsev, Roman --- .../multiprocessing_utils.py | 93 +++++++++++++++++++ .../models_hub_common/test_convert_model.py | 8 +- .../models_hub_common/utils.py | 13 ++- .../tf_hub_tests/precommit_models | 3 +- .../tf_hub_tests/test_tf_hub_convert_model.py | 14 ++- 5 files changed, 123 insertions(+), 8 deletions(-) create mode 100644 tests/model_hub_tests/models_hub_common/multiprocessing_utils.py diff --git a/tests/model_hub_tests/models_hub_common/multiprocessing_utils.py b/tests/model_hub_tests/models_hub_common/multiprocessing_utils.py new file mode 100644 index 00000000000000..317dfdb3fe2512 --- /dev/null +++ b/tests/model_hub_tests/models_hub_common/multiprocessing_utils.py @@ -0,0 +1,93 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import os +import platform +import signal +import sys +import traceback +from multiprocessing import Process, Queue, TimeoutError, ProcessError +from queue import Empty as QueueEmpty +from typing import Callable, Union + +def _mp_wrapped_func(func: Callable, func_args: list, queue: Queue, logger_queue: Queue): + """ + Wraps callable object with exception handling. Current wrapper is a target for + `multiprocessing_run` function + :param func: see `multiprocessing_run` + :param func_args: see `multiprocessing_run` + :param queue: multiprocessing.Queue(). Used for getting callable object return values + :param logger_queue: multiprocessing.Queue(). Used for getting logs from child process in parent process + :return: + """ + + error_message = "" + res = None + try: + res = func(*func_args) + except: + ex_type, ex_value, tb = sys.exc_info() + error_message = "{tb}\n{ex_type}: {ex_value}".format(tb=''.join(traceback.format_tb(tb)), + ex_type=ex_type.__name__, ex_value=ex_value) + queue.put((error_message, res)) + + +def multiprocessing_run(func: Callable, func_args: list, func_log_name: str, timeout: Union[int, None] = None): + """ + Wraps callable object to a separate process using multiprocessing module + :param func: callable object + :param func_args: list of arguments for callable + :param func_log_name: name of callable used for logging + :param timeout: positive int to limit execution time + :return: return value (or values) from callable object + """ + queue = Queue() + logger_queue = Queue(-1) + process = Process(target=_mp_wrapped_func, args=(func, func_args, queue, logger_queue)) + process.start() + try: + error_message, *ret_args = queue.get(timeout=timeout) + except QueueEmpty: + raise TimeoutError("{func} running timed out!".format(func=func_log_name)) + finally: + queue.close() + + # Extract logs from Queue and pass to root logger + while not logger_queue.empty(): + rec = logger_queue.get() + log.getLogger().handle(rec) + logger_queue.close() + + if process.is_alive(): + process.terminate() + process.join() + else: + exit_signal = multiprocessing_exitcode_to_signal(process.exitcode) + if exit_signal: + raise ProcessError( + "{func} was killed with a signal {signal}".format(func=func_log_name, signal=exit_signal)) + + if error_message: + raise ProcessError("\n{func} running failed: \n{msg}".format(func=func_log_name, msg=error_message)) + + ret_args = ret_args[0] if len(ret_args) == 1 else ret_args # unwrap from list if only 1 item is returned + return ret_args + + +def multiprocessing_exitcode_to_signal(exitcode): + """ + Map multiprocessing exitcode to signals from "signal" module + :param exitcode: multiprocessing exitcode + :return: signal from "signal" if exitcode mapped on signal or None + """ + # Multiprocessing return negative values of signal of the process, but on Win they are positive. + # Bring the value to the positive format. + exit_code = exitcode if os.name == "nt" else -exitcode + if exit_code > 0: + code_map = {int(getattr(signal, sig)): str(getattr(signal, sig)) + for sig in dir(signal) if sig.startswith("SIG")} + exit_signal = code_map[exit_code] if exit_code in code_map else exit_code + else: + exit_signal = None + return exit_signal diff --git a/tests/model_hub_tests/models_hub_common/test_convert_model.py b/tests/model_hub_tests/models_hub_common/test_convert_model.py index 767dfa2b5d4aee..70b5418136ef9a 100644 --- a/tests/model_hub_tests/models_hub_common/test_convert_model.py +++ b/tests/model_hub_tests/models_hub_common/test_convert_model.py @@ -3,11 +3,14 @@ import gc import numpy as np +from models_hub_common.multiprocessing_utils import multiprocessing_run from openvino.runtime import Core from openvino.tools.mo import convert_model class TestConvertModel: + infer_timeout = 600 + def load_model(self, model_name, model_link): raise "load_model is not implemented" @@ -74,7 +77,7 @@ def teardown_method(self): # deallocate memory after each test case gc.collect() - def run(self, model_name, model_link, ie_device): + def _run(self, model_name, model_link, ie_device): print("Load the model {} (url: {})".format(model_name, model_link)) fw_model = self.load_model(model_name, model_link) print("Retrieve inputs info") @@ -89,3 +92,6 @@ def run(self, model_name, model_link, ie_device): ov_outputs = self.infer_ov_model(ov_model, inputs, ie_device) print("Compare TensorFlow and OpenVINO results") self.compare_results(fw_outputs, ov_outputs) + + def run(self, model_name, model_link, ie_device): + multiprocessing_run(self._run, [model_name, model_link, ie_device], model_name, self.infer_timeout) diff --git a/tests/model_hub_tests/models_hub_common/utils.py b/tests/model_hub_tests/models_hub_common/utils.py index b2b31c72707bac..3c4d16402de4d2 100644 --- a/tests/model_hub_tests/models_hub_common/utils.py +++ b/tests/model_hub_tests/models_hub_common/utils.py @@ -10,8 +10,17 @@ def get_models_list(file_name: str): models = [] with open(file_name) as f: for model_info in f: - model_name, model_link = model_info.split(',') - models.append((model_name, model_link)) + mark = None + reason = None + assert len(model_info.split(',')) == 2 or len(model_info.split(',')) == 4, \ + "Incorrect model info `{}`. It must contain either 2 or 4 fields.".format(model_info) + if len(model_info.split(',')) == 2: + model_name, model_link = model_info.split(',') + elif len(model_info.split(',')) == 4: + model_name, model_link, mark, reason = model_info.split(',') + assert mark == "skip", "Incorrect failure mark for model info {}".format(model_info) + models.append((model_name, model_link, mark, reason)) + return models diff --git a/tests/model_hub_tests/tf_hub_tests/precommit_models b/tests/model_hub_tests/tf_hub_tests/precommit_models index 600039ee27d02d..63fbdbb63bb418 100644 --- a/tests/model_hub_tests/tf_hub_tests/precommit_models +++ b/tests/model_hub_tests/tf_hub_tests/precommit_models @@ -5,4 +5,5 @@ movenet/singlepose/thunder,https://tfhub.dev/google/movenet/singlepose/thunder/4 imagenet/mobilenet_v2_100_224/feature_vector,https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/5?tf-hub-format=compressed efficientnet/lite0/classification,https://tfhub.dev/tensorflow/efficientnet/lite0/classification/2?tf-hub-format=compressed movenet/multipose/lightning,https://tfhub.dev/google/movenet/multipose/lightning/1?tf-hub-format=compressed -imagenet/efficientnet_v2_imagenet1k_b0/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b0/feature_vector/2?tf-hub-format=compressed \ No newline at end of file +imagenet/efficientnet_v2_imagenet1k_b0/feature_vector,https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b0/feature_vector/2?tf-hub-format=compressed +imagenet/mobilenet_v1_100_224/classification,https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/classification/5?tf-hub-format=compressed,skip,119718 - Accuracy issue \ No newline at end of file diff --git a/tests/model_hub_tests/tf_hub_tests/test_tf_hub_convert_model.py b/tests/model_hub_tests/tf_hub_tests/test_tf_hub_convert_model.py index 6bb0ef7e9b837c..6ae10afa2c5c6f 100644 --- a/tests/model_hub_tests/tf_hub_tests/test_tf_hub_convert_model.py +++ b/tests/model_hub_tests/tf_hub_tests/test_tf_hub_convert_model.py @@ -79,14 +79,20 @@ def infer_fw_model(self, model_obj, inputs): fw_outputs[internal_name] = out_value return fw_outputs - @pytest.mark.parametrize("model_name,model_link", + @pytest.mark.parametrize("model_name,model_link,mark,reason", get_models_list(os.path.join(os.path.dirname(__file__), "precommit_models"))) @pytest.mark.precommit - def test_convert_model_precommit(self, model_name, model_link, ie_device): + def test_convert_model_precommit(self, model_name, model_link, mark, reason, ie_device): + assert mark is None or mark == 'skip', "Incorrect test case: {}, {}".format(model_name, model_link) + if mark == 'skip': + pytest.skip(reason) self.run(model_name, model_link, ie_device) - @pytest.mark.parametrize("model_name,model_link", + @pytest.mark.parametrize("model_name,model_link,mark,reason", get_models_list(os.path.join(os.path.dirname(__file__), "nightly_models"))) @pytest.mark.nightly - def test_convert_model_all_models(self, model_name, model_link, ie_device): + def test_convert_model_all_models(self, model_name, model_link, mark, reason, ie_device): + assert mark is None or mark == 'skip', "Incorrect test case: {}, {}".format(model_name, model_link) + if mark == 'skip': + pytest.skip(reason) self.run(model_name, model_link, ie_device) From 1b5f4287524b2c33917baa9d5bebd2b3694b24d7 Mon Sep 17 00:00:00 2001 From: Xiuchuan Zhai Date: Tue, 5 Sep 2023 14:36:30 +0800 Subject: [PATCH 14/16] eliminate broadcast node in masked_fill (#19595) --- src/frontends/pytorch/src/op/masked_fill.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/frontends/pytorch/src/op/masked_fill.cpp b/src/frontends/pytorch/src/op/masked_fill.cpp index 5ed090e0b619b7..2093e05679044d 100644 --- a/src/frontends/pytorch/src/op/masked_fill.cpp +++ b/src/frontends/pytorch/src/op/masked_fill.cpp @@ -23,14 +23,12 @@ OutputVector translate_masked_fill(const NodeContext& context) { auto data = context.get_input(0); auto mask = context.get_input(1); auto value = context.get_input(2); - auto data_shape = context.mark_node(std::make_shared(data, element::i32)); value = context.mark_node(std::make_shared(value, data)); - auto broadcasted_value = context.mark_node(std::make_shared(value, data_shape)); auto bool_mask = context.mark_node(std::make_shared(mask, element::boolean)); - return {context.mark_node(std::make_shared(bool_mask, broadcasted_value, data))}; + return {context.mark_node(std::make_shared(bool_mask, value, data))}; }; } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov From 53414832eb7a22b5aa4b52b11cdd16eef0d7096c Mon Sep 17 00:00:00 2001 From: Alexander Suvorov Date: Tue, 5 Sep 2023 09:51:04 +0200 Subject: [PATCH 15/16] add 2023.0.2 selector tool (#19598) --- .../selector-tool/assets/selector-4429452f.js | 54 ------------------- .../selector-tool/assets/selector-8f26adbc.js | 54 +++++++++++++++++++ ...tor-136759b.html => selector-d0636ef.html} | 4 +- .../installing-openvino-overview.md | 2 +- 4 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 docs/_static/selector-tool/assets/selector-4429452f.js create mode 100644 docs/_static/selector-tool/assets/selector-8f26adbc.js rename docs/_static/selector-tool/{selector-136759b.html => selector-d0636ef.html} (79%) diff --git a/docs/_static/selector-tool/assets/selector-4429452f.js b/docs/_static/selector-tool/assets/selector-4429452f.js deleted file mode 100644 index 53e227952f81b2..00000000000000 --- a/docs/_static/selector-tool/assets/selector-4429452f.js +++ /dev/null @@ -1,54 +0,0 @@ -var hf=Object.defineProperty;var gf=(e,t,n)=>t in e?hf(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var Ae=(e,t,n)=>(gf(e,typeof t!="symbol"?t+"":t,n),n);function vf(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var Kr={},yf={get exports(){return Kr},set exports(e){Kr=e}},Di={},U={},_f={get exports(){return U},set exports(e){U=e}},j={};/** - * @license React - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var uo=Symbol.for("react.element"),kf=Symbol.for("react.portal"),wf=Symbol.for("react.fragment"),Sf=Symbol.for("react.strict_mode"),Of=Symbol.for("react.profiler"),Ef=Symbol.for("react.provider"),Nf=Symbol.for("react.context"),Pf=Symbol.for("react.forward_ref"),Rf=Symbol.for("react.suspense"),xf=Symbol.for("react.memo"),Cf=Symbol.for("react.lazy"),Ga=Symbol.iterator;function Tf(e){return e===null||typeof e!="object"?null:(e=Ga&&e[Ga]||e["@@iterator"],typeof e=="function"?e:null)}var Vc={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},Dc=Object.assign,Fc={};function cr(e,t,n){this.props=e,this.context=t,this.refs=Fc,this.updater=n||Vc}cr.prototype.isReactComponent={};cr.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};cr.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function Uc(){}Uc.prototype=cr.prototype;function Kl(e,t,n){this.props=e,this.context=t,this.refs=Fc,this.updater=n||Vc}var Wl=Kl.prototype=new Uc;Wl.constructor=Kl;Dc(Wl,cr.prototype);Wl.isPureReactComponent=!0;var Qa=Array.isArray,jc=Object.prototype.hasOwnProperty,Yl={current:null},Mc={key:!0,ref:!0,__self:!0,__source:!0};function Ac(e,t,n){var r,o={},i=null,s=null;if(t!=null)for(r in t.ref!==void 0&&(s=t.ref),t.key!==void 0&&(i=""+t.key),t)jc.call(t,r)&&!Mc.hasOwnProperty(r)&&(o[r]=t[r]);var l=arguments.length-2;if(l===1)o.children=n;else if(1{const e={type:"size",height:document.body.offsetHeight};window.parent.postMessage(e)};new ResizeObserver($f).observe(document.body);function ve(e){return ve=typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?function(t){return typeof t}:function(t){return t&&typeof Symbol=="function"&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},ve(e)}function dt(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function Bf(e,t){if(ve(e)!=="object"||e===null)return e;var n=e[Symbol.toPrimitive];if(n!==void 0){var r=n.call(e,t||"default");if(ve(r)!=="object")return r;throw new TypeError("@@toPrimitive must return a primitive value.")}return(t==="string"?String:Number)(e)}function $c(e){var t=Bf(e,"string");return ve(t)==="symbol"?t:String(t)}function Ja(e,t){for(var n=0;ne.length)&&(t=e.length);for(var n=0,r=new Array(t);n1&&arguments[1]!==void 0?arguments[1]:{};dt(this,e),this.init(t,n)}return pt(e,[{key:"init",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};this.prefix=r.prefix||"i18next:",this.logger=n||Wf,this.options=r,this.debug=r.debug}},{key:"setDebug",value:function(n){this.debug=n}},{key:"log",value:function(){for(var n=arguments.length,r=new Array(n),o=0;o1?r-1:0),i=1;i-1?l.replace(/###/g,"."):l}function o(){return!e||typeof e=="string"}for(var i=typeof t!="string"?[].concat(t):t.split(".");i.length>1;){if(o())return{};var s=r(i.shift());!e[s]&&n&&(e[s]=new n),Object.prototype.hasOwnProperty.call(e,s)?e=e[s]:e={}}return o()?{}:{obj:e,k:r(i.shift())}}function ru(e,t,n){var r=Gl(e,t,Object),o=r.obj,i=r.k;o[i]=n}function Gf(e,t,n,r){var o=Gl(e,t,Object),i=o.obj,s=o.k;i[s]=i[s]||[],r&&(i[s]=i[s].concat(n)),r||i[s].push(n)}function ai(e,t){var n=Gl(e,t),r=n.obj,o=n.k;if(r)return r[o]}function ou(e,t,n){var r=ai(e,n);return r!==void 0?r:ai(t,n)}function Wc(e,t,n){for(var r in t)r!=="__proto__"&&r!=="constructor"&&(r in e?typeof e[r]=="string"||e[r]instanceof String||typeof t[r]=="string"||t[r]instanceof String?n&&(e[r]=t[r]):Wc(e[r],t[r],n):e[r]=t[r]);return e}function bn(e){return e.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g,"\\$&")}var Qf={"&":"&","<":"<",">":">",'"':""","'":"'","/":"/"};function Xf(e){return typeof e=="string"?e.replace(/[&<>"'\/]/g,function(t){return Qf[t]}):e}var Ui=typeof window<"u"&&window.navigator&&typeof window.navigator.userAgentData>"u"&&window.navigator.userAgent&&window.navigator.userAgent.indexOf("MSIE")>-1,Jf=[" ",",","?","!",";"];function Zf(e,t,n){t=t||"",n=n||"";var r=Jf.filter(function(l){return t.indexOf(l)<0&&n.indexOf(l)<0});if(r.length===0)return!0;var o=new RegExp("(".concat(r.map(function(l){return l==="?"?"\\?":l}).join("|"),")")),i=!o.test(e);if(!i){var s=e.indexOf(n);s>0&&!o.test(e.substring(0,s))&&(i=!0)}return i}function iu(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function yo(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function Yc(e,t){var n=arguments.length>2&&arguments[2]!==void 0?arguments[2]:".";if(e){if(e[t])return e[t];for(var r=t.split(n),o=e,i=0;ii+s;)s++,l=r.slice(i,i+s).join(n),a=o[l];if(a===void 0)return;if(a===null)return null;if(t.endsWith(l)){if(typeof a=="string")return a;if(l&&typeof a[l]=="string")return a[l]}var u=r.slice(i+s).join(n);return u?Yc(a,u,n):void 0}o=o[r[i]]}return o}}var nm=function(e){Fi(n,e);var t=em(n);function n(r){var o,i=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{ns:["translation"],defaultNS:"translation"};return dt(this,n),o=t.call(this),Ui&&sn.call(Qt(o)),o.data=r||{},o.options=i,o.options.keySeparator===void 0&&(o.options.keySeparator="."),o.options.ignoreJSONStructure===void 0&&(o.options.ignoreJSONStructure=!0),o}return pt(n,[{key:"addNamespaces",value:function(o){this.options.ns.indexOf(o)<0&&this.options.ns.push(o)}},{key:"removeNamespaces",value:function(o){var i=this.options.ns.indexOf(o);i>-1&&this.options.ns.splice(i,1)}},{key:"getResource",value:function(o,i,s){var l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{},a=l.keySeparator!==void 0?l.keySeparator:this.options.keySeparator,u=l.ignoreJSONStructure!==void 0?l.ignoreJSONStructure:this.options.ignoreJSONStructure,f=[o,i];s&&typeof s!="string"&&(f=f.concat(s)),s&&typeof s=="string"&&(f=f.concat(a?s.split(a):s)),o.indexOf(".")>-1&&(f=o.split("."));var d=ai(this.data,f);return d||!u||typeof s!="string"?d:Yc(this.data&&this.data[o]&&this.data[o][i],s,a)}},{key:"addResource",value:function(o,i,s,l){var a=arguments.length>4&&arguments[4]!==void 0?arguments[4]:{silent:!1},u=this.options.keySeparator;u===void 0&&(u=".");var f=[o,i];s&&(f=f.concat(u?s.split(u):s)),o.indexOf(".")>-1&&(f=o.split("."),l=i,i=f[1]),this.addNamespaces(i),ru(this.data,f,l),a.silent||this.emit("added",o,i,s,l)}},{key:"addResources",value:function(o,i,s){var l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{silent:!1};for(var a in s)(typeof s[a]=="string"||Object.prototype.toString.apply(s[a])==="[object Array]")&&this.addResource(o,i,a,s[a],{silent:!0});l.silent||this.emit("added",o,i,s)}},{key:"addResourceBundle",value:function(o,i,s,l,a){var u=arguments.length>5&&arguments[5]!==void 0?arguments[5]:{silent:!1},f=[o,i];o.indexOf(".")>-1&&(f=o.split("."),l=s,s=i,i=f[1]),this.addNamespaces(i);var d=ai(this.data,f)||{};l?Wc(d,s,a):d=yo(yo({},d),s),ru(this.data,f,d),u.silent||this.emit("added",o,i,s)}},{key:"removeResourceBundle",value:function(o,i){this.hasResourceBundle(o,i)&&delete this.data[o][i],this.removeNamespaces(i),this.emit("removed",o,i)}},{key:"hasResourceBundle",value:function(o,i){return this.getResource(o,i)!==void 0}},{key:"getResourceBundle",value:function(o,i){return i||(i=this.options.defaultNS),this.options.compatibilityAPI==="v1"?yo(yo({},{}),this.getResource(o,i)):this.getResource(o,i)}},{key:"getDataByLanguage",value:function(o){return this.data[o]}},{key:"hasLanguageSomeTranslations",value:function(o){var i=this.getDataByLanguage(o),s=i&&Object.keys(i)||[];return!!s.find(function(l){return i[l]&&Object.keys(i[l]).length>0})}},{key:"toJSON",value:function(){return this.data}}]),n}(sn),qc={processors:{},addPostProcessor:function(t){this.processors[t.name]=t},handle:function(t,n,r,o,i){var s=this;return t.forEach(function(l){s.processors[l]&&(n=s.processors[l].process(n,r,o,i))}),n}};function su(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function Ne(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}var lu={},au=function(e){Fi(n,e);var t=rm(n);function n(r){var o,i=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};return dt(this,n),o=t.call(this),Ui&&sn.call(Qt(o)),qf(["resourceStore","languageUtils","pluralResolver","interpolator","backendConnector","i18nFormat","utils"],r,Qt(o)),o.options=i,o.options.keySeparator===void 0&&(o.options.keySeparator="."),o.logger=wt.create("translator"),o}return pt(n,[{key:"changeLanguage",value:function(o){o&&(this.language=o)}},{key:"exists",value:function(o){var i=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{interpolation:{}};if(o==null)return!1;var s=this.resolve(o,i);return s&&s.res!==void 0}},{key:"extractFromKey",value:function(o,i){var s=i.nsSeparator!==void 0?i.nsSeparator:this.options.nsSeparator;s===void 0&&(s=":");var l=i.keySeparator!==void 0?i.keySeparator:this.options.keySeparator,a=i.ns||this.options.defaultNS||[],u=s&&o.indexOf(s)>-1,f=!this.options.userDefinedKeySeparator&&!i.keySeparator&&!this.options.userDefinedNsSeparator&&!i.nsSeparator&&!Zf(o,s,l);if(u&&!f){var d=o.match(this.interpolator.nestingRegexp);if(d&&d.length>0)return{key:o,namespaces:a};var m=o.split(s);(s!==l||s===l&&this.options.ns.indexOf(m[0])>-1)&&(a=m.shift()),o=m.join(l)}return typeof a=="string"&&(a=[a]),{key:o,namespaces:a}}},{key:"translate",value:function(o,i,s){var l=this;if(ve(i)!=="object"&&this.options.overloadTranslationOptionHandler&&(i=this.options.overloadTranslationOptionHandler(arguments)),i||(i={}),o==null)return"";Array.isArray(o)||(o=[String(o)]);var a=i.returnDetails!==void 0?i.returnDetails:this.options.returnDetails,u=i.keySeparator!==void 0?i.keySeparator:this.options.keySeparator,f=this.extractFromKey(o[o.length-1],i),d=f.key,m=f.namespaces,g=m[m.length-1],v=i.lng||this.language,k=i.appendNamespaceToCIMode||this.options.appendNamespaceToCIMode;if(v&&v.toLowerCase()==="cimode"){if(k){var O=i.nsSeparator||this.options.nsSeparator;return a?{res:"".concat(g).concat(O).concat(d),usedKey:d,exactUsedKey:d,usedLng:v,usedNS:g}:"".concat(g).concat(O).concat(d)}return a?{res:d,usedKey:d,exactUsedKey:d,usedLng:v,usedNS:g}:d}var p=this.resolve(o,i),c=p&&p.res,h=p&&p.usedKey||d,y=p&&p.exactUsedKey||d,S=Object.prototype.toString.apply(c),w=["[object Number]","[object Function]","[object RegExp]"],P=i.joinArrays!==void 0?i.joinArrays:this.options.joinArrays,R=!this.i18nFormat||this.i18nFormat.handleAsObject,D=typeof c!="string"&&typeof c!="boolean"&&typeof c!="number";if(R&&c&&D&&w.indexOf(S)<0&&!(typeof P=="string"&&S==="[object Array]")){if(!i.returnObjects&&!this.options.returnObjects){this.options.returnedObjectHandler||this.logger.warn("accessing an object - but returnObjects options is not enabled!");var x=this.options.returnedObjectHandler?this.options.returnedObjectHandler(h,c,Ne(Ne({},i),{},{ns:m})):"key '".concat(d," (").concat(this.language,")' returned an object instead of string.");return a?(p.res=x,p):x}if(u){var K=S==="[object Array]",Te=K?[]:{},xt=K?y:h;for(var tt in c)if(Object.prototype.hasOwnProperty.call(c,tt)){var Tn="".concat(xt).concat(u).concat(tt);Te[tt]=this.translate(Tn,Ne(Ne({},i),{joinArrays:!1,ns:m})),Te[tt]===Tn&&(Te[tt]=c[tt])}c=Te}}else if(R&&typeof P=="string"&&S==="[object Array]")c=c.join(P),c&&(c=this.extendTranslation(c,o,i,s));else{var gt=!1,nt=!1,C=i.count!==void 0&&typeof i.count!="string",V=n.hasDefaultValue(i),F=C?this.pluralResolver.getSuffix(v,i.count,i):"",A=i["defaultValue".concat(F)]||i.defaultValue;!this.isValidLookup(c)&&V&&(gt=!0,c=A),this.isValidLookup(c)||(nt=!0,c=d);var X=i.missingKeyNoValueFallbackToKey||this.options.missingKeyNoValueFallbackToKey,Ct=X&&nt?void 0:c,je=V&&A!==c&&this.options.updateMissing;if(nt||gt||je){if(this.logger.log(je?"updateKey":"missingKey",v,g,d,je?A:c),u){var Ln=this.resolve(d,Ne(Ne({},i),{},{keySeparator:!1}));Ln&&Ln.res&&this.logger.warn("Seems the loaded translations were in flat JSON format instead of nested. Either set keySeparator: false on init or make sure your translations are published in nested format.")}var Me=[],Tt=this.languageUtils.getFallbackCodes(this.options.fallbackLng,i.lng||this.language);if(this.options.saveMissingTo==="fallback"&&Tt&&Tt[0])for(var Zi=0;Zi1&&arguments[1]!==void 0?arguments[1]:{},l,a,u,f,d;return typeof o=="string"&&(o=[o]),o.forEach(function(m){if(!i.isValidLookup(l)){var g=i.extractFromKey(m,s),v=g.key;a=v;var k=g.namespaces;i.options.fallbackNS&&(k=k.concat(i.options.fallbackNS));var O=s.count!==void 0&&typeof s.count!="string",p=O&&!s.ordinal&&s.count===0&&i.pluralResolver.shouldUseIntlApi(),c=s.context!==void 0&&(typeof s.context=="string"||typeof s.context=="number")&&s.context!=="",h=s.lngs?s.lngs:i.languageUtils.toResolveHierarchy(s.lng||i.language,s.fallbackLng);k.forEach(function(y){i.isValidLookup(l)||(d=y,!lu["".concat(h[0],"-").concat(y)]&&i.utils&&i.utils.hasLoadedNamespace&&!i.utils.hasLoadedNamespace(d)&&(lu["".concat(h[0],"-").concat(y)]=!0,i.logger.warn('key "'.concat(a,'" for languages "').concat(h.join(", "),`" won't get resolved as namespace "`).concat(d,'" was not yet loaded'),"This means something IS WRONG in your setup. You access the t function before i18next.init / i18next.loadNamespace / i18next.changeLanguage was done. Wait for the callback or Promise to resolve before accessing it!!!")),h.forEach(function(S){if(!i.isValidLookup(l)){f=S;var w=[v];if(i.i18nFormat&&i.i18nFormat.addLookupKeys)i.i18nFormat.addLookupKeys(w,v,S,y,s);else{var P;O&&(P=i.pluralResolver.getSuffix(S,s.count,s));var R="".concat(i.options.pluralSeparator,"zero");if(O&&(w.push(v+P),p&&w.push(v+R)),c){var D="".concat(v).concat(i.options.contextSeparator).concat(s.context);w.push(D),O&&(w.push(D+P),p&&w.push(D+R))}}for(var x;x=w.pop();)i.isValidLookup(l)||(u=x,l=i.getResource(S,y,x,s))}}))})}}),{res:l,usedKey:a,exactUsedKey:u,usedLng:f,usedNS:d}}},{key:"isValidLookup",value:function(o){return o!==void 0&&!(!this.options.returnNull&&o===null)&&!(!this.options.returnEmptyString&&o==="")}},{key:"getResource",value:function(o,i,s){var l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{};return this.i18nFormat&&this.i18nFormat.getResource?this.i18nFormat.getResource(o,i,s,l):this.resourceStore.getResource(o,i,s,l)}}],[{key:"hasDefaultValue",value:function(o){var i="defaultValue";for(var s in o)if(Object.prototype.hasOwnProperty.call(o,s)&&i===s.substring(0,i.length)&&o[s]!==void 0)return!0;return!1}}]),n}(sn);function rs(e){return e.charAt(0).toUpperCase()+e.slice(1)}var uu=function(){function e(t){dt(this,e),this.options=t,this.supportedLngs=this.options.supportedLngs||!1,this.logger=wt.create("languageUtils")}return pt(e,[{key:"getScriptPartFromCode",value:function(n){if(!n||n.indexOf("-")<0)return null;var r=n.split("-");return r.length===2||(r.pop(),r[r.length-1].toLowerCase()==="x")?null:this.formatLanguageCode(r.join("-"))}},{key:"getLanguagePartFromCode",value:function(n){if(!n||n.indexOf("-")<0)return n;var r=n.split("-");return this.formatLanguageCode(r[0])}},{key:"formatLanguageCode",value:function(n){if(typeof n=="string"&&n.indexOf("-")>-1){var r=["hans","hant","latn","cyrl","cans","mong","arab"],o=n.split("-");return this.options.lowerCaseLng?o=o.map(function(i){return i.toLowerCase()}):o.length===2?(o[0]=o[0].toLowerCase(),o[1]=o[1].toUpperCase(),r.indexOf(o[1].toLowerCase())>-1&&(o[1]=rs(o[1].toLowerCase()))):o.length===3&&(o[0]=o[0].toLowerCase(),o[1].length===2&&(o[1]=o[1].toUpperCase()),o[0]!=="sgn"&&o[2].length===2&&(o[2]=o[2].toUpperCase()),r.indexOf(o[1].toLowerCase())>-1&&(o[1]=rs(o[1].toLowerCase())),r.indexOf(o[2].toLowerCase())>-1&&(o[2]=rs(o[2].toLowerCase()))),o.join("-")}return this.options.cleanCode||this.options.lowerCaseLng?n.toLowerCase():n}},{key:"isSupportedCode",value:function(n){return(this.options.load==="languageOnly"||this.options.nonExplicitSupportedLngs)&&(n=this.getLanguagePartFromCode(n)),!this.supportedLngs||!this.supportedLngs.length||this.supportedLngs.indexOf(n)>-1}},{key:"getBestMatchFromCodes",value:function(n){var r=this;if(!n)return null;var o;return n.forEach(function(i){if(!o){var s=r.formatLanguageCode(i);(!r.options.supportedLngs||r.isSupportedCode(s))&&(o=s)}}),!o&&this.options.supportedLngs&&n.forEach(function(i){if(!o){var s=r.getLanguagePartFromCode(i);if(r.isSupportedCode(s))return o=s;o=r.options.supportedLngs.find(function(l){if(l.indexOf(s)===0)return l})}}),o||(o=this.getFallbackCodes(this.options.fallbackLng)[0]),o}},{key:"getFallbackCodes",value:function(n,r){if(!n)return[];if(typeof n=="function"&&(n=n(r)),typeof n=="string"&&(n=[n]),Object.prototype.toString.apply(n)==="[object Array]")return n;if(!r)return n.default||[];var o=n[r];return o||(o=n[this.getScriptPartFromCode(r)]),o||(o=n[this.formatLanguageCode(r)]),o||(o=n[this.getLanguagePartFromCode(r)]),o||(o=n.default),o||[]}},{key:"toResolveHierarchy",value:function(n,r){var o=this,i=this.getFallbackCodes(r||this.options.fallbackLng||[],n),s=[],l=function(u){u&&(o.isSupportedCode(u)?s.push(u):o.logger.warn("rejecting language code not found in supportedLngs: ".concat(u)))};return typeof n=="string"&&n.indexOf("-")>-1?(this.options.load!=="languageOnly"&&l(this.formatLanguageCode(n)),this.options.load!=="languageOnly"&&this.options.load!=="currentOnly"&&l(this.getScriptPartFromCode(n)),this.options.load!=="currentOnly"&&l(this.getLanguagePartFromCode(n))):typeof n=="string"&&l(this.formatLanguageCode(n)),i.forEach(function(a){s.indexOf(a)<0&&l(o.formatLanguageCode(a))}),s}}]),e}(),im=[{lngs:["ach","ak","am","arn","br","fil","gun","ln","mfe","mg","mi","oc","pt","pt-BR","tg","tl","ti","tr","uz","wa"],nr:[1,2],fc:1},{lngs:["af","an","ast","az","bg","bn","ca","da","de","dev","el","en","eo","es","et","eu","fi","fo","fur","fy","gl","gu","ha","hi","hu","hy","ia","it","kk","kn","ku","lb","mai","ml","mn","mr","nah","nap","nb","ne","nl","nn","no","nso","pa","pap","pms","ps","pt-PT","rm","sco","se","si","so","son","sq","sv","sw","ta","te","tk","ur","yo"],nr:[1,2],fc:2},{lngs:["ay","bo","cgg","fa","ht","id","ja","jbo","ka","km","ko","ky","lo","ms","sah","su","th","tt","ug","vi","wo","zh"],nr:[1],fc:3},{lngs:["be","bs","cnr","dz","hr","ru","sr","uk"],nr:[1,2,5],fc:4},{lngs:["ar"],nr:[0,1,2,3,11,100],fc:5},{lngs:["cs","sk"],nr:[1,2,5],fc:6},{lngs:["csb","pl"],nr:[1,2,5],fc:7},{lngs:["cy"],nr:[1,2,3,8],fc:8},{lngs:["fr"],nr:[1,2],fc:9},{lngs:["ga"],nr:[1,2,3,7,11],fc:10},{lngs:["gd"],nr:[1,2,3,20],fc:11},{lngs:["is"],nr:[1,2],fc:12},{lngs:["jv"],nr:[0,1],fc:13},{lngs:["kw"],nr:[1,2,3,4],fc:14},{lngs:["lt"],nr:[1,2,10],fc:15},{lngs:["lv"],nr:[1,2,0],fc:16},{lngs:["mk"],nr:[1,2],fc:17},{lngs:["mnk"],nr:[0,1,2],fc:18},{lngs:["mt"],nr:[1,2,11,20],fc:19},{lngs:["or"],nr:[2,1],fc:2},{lngs:["ro"],nr:[1,2,20],fc:20},{lngs:["sl"],nr:[5,1,2,3],fc:21},{lngs:["he","iw"],nr:[1,2,20,21],fc:22}],sm={1:function(t){return+(t>1)},2:function(t){return+(t!=1)},3:function(t){return 0},4:function(t){return t%10==1&&t%100!=11?0:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?1:2},5:function(t){return t==0?0:t==1?1:t==2?2:t%100>=3&&t%100<=10?3:t%100>=11?4:5},6:function(t){return t==1?0:t>=2&&t<=4?1:2},7:function(t){return t==1?0:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?1:2},8:function(t){return t==1?0:t==2?1:t!=8&&t!=11?2:3},9:function(t){return+(t>=2)},10:function(t){return t==1?0:t==2?1:t<7?2:t<11?3:4},11:function(t){return t==1||t==11?0:t==2||t==12?1:t>2&&t<20?2:3},12:function(t){return+(t%10!=1||t%100==11)},13:function(t){return+(t!==0)},14:function(t){return t==1?0:t==2?1:t==3?2:3},15:function(t){return t%10==1&&t%100!=11?0:t%10>=2&&(t%100<10||t%100>=20)?1:2},16:function(t){return t%10==1&&t%100!=11?0:t!==0?1:2},17:function(t){return t==1||t%10==1&&t%100!=11?0:1},18:function(t){return t==0?0:t==1?1:2},19:function(t){return t==1?0:t==0||t%100>1&&t%100<11?1:t%100>10&&t%100<20?2:3},20:function(t){return t==1?0:t==0||t%100>0&&t%100<20?1:2},21:function(t){return t%100==1?1:t%100==2?2:t%100==3||t%100==4?3:0},22:function(t){return t==1?0:t==2?1:(t<0||t>10)&&t%10==0?2:3}},lm=["v1","v2","v3"],cu={zero:0,one:1,two:2,few:3,many:4,other:5};function am(){var e={};return im.forEach(function(t){t.lngs.forEach(function(n){e[n]={numbers:t.nr,plurals:sm[t.fc]}})}),e}var um=function(){function e(t){var n=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};dt(this,e),this.languageUtils=t,this.options=n,this.logger=wt.create("pluralResolver"),(!this.options.compatibilityJSON||this.options.compatibilityJSON==="v4")&&(typeof Intl>"u"||!Intl.PluralRules)&&(this.options.compatibilityJSON="v3",this.logger.error("Your environment seems not to be Intl API compatible, use an Intl.PluralRules polyfill. Will fallback to the compatibilityJSON v3 format handling.")),this.rules=am()}return pt(e,[{key:"addRule",value:function(n,r){this.rules[n]=r}},{key:"getRule",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};if(this.shouldUseIntlApi())try{return new Intl.PluralRules(n,{type:r.ordinal?"ordinal":"cardinal"})}catch{return}return this.rules[n]||this.rules[this.languageUtils.getLanguagePartFromCode(n)]}},{key:"needsPlural",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},o=this.getRule(n,r);return this.shouldUseIntlApi()?o&&o.resolvedOptions().pluralCategories.length>1:o&&o.numbers.length>1}},{key:"getPluralFormsOfKey",value:function(n,r){var o=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{};return this.getSuffixes(n,o).map(function(i){return"".concat(r).concat(i)})}},{key:"getSuffixes",value:function(n){var r=this,o=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},i=this.getRule(n,o);return i?this.shouldUseIntlApi()?i.resolvedOptions().pluralCategories.sort(function(s,l){return cu[s]-cu[l]}).map(function(s){return"".concat(r.options.prepend).concat(s)}):i.numbers.map(function(s){return r.getSuffix(n,s,o)}):[]}},{key:"getSuffix",value:function(n,r){var o=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{},i=this.getRule(n,o);return i?this.shouldUseIntlApi()?"".concat(this.options.prepend).concat(i.select(r)):this.getSuffixRetroCompatible(i,r):(this.logger.warn("no plural rule found for: ".concat(n)),"")}},{key:"getSuffixRetroCompatible",value:function(n,r){var o=this,i=n.noAbs?n.plurals(r):n.plurals(Math.abs(r)),s=n.numbers[i];this.options.simplifyPluralSuffix&&n.numbers.length===2&&n.numbers[0]===1&&(s===2?s="plural":s===1&&(s=""));var l=function(){return o.options.prepend&&s.toString()?o.options.prepend+s.toString():s.toString()};return this.options.compatibilityJSON==="v1"?s===1?"":typeof s=="number"?"_plural_".concat(s.toString()):l():this.options.compatibilityJSON==="v2"||this.options.simplifyPluralSuffix&&n.numbers.length===2&&n.numbers[0]===1?l():this.options.prepend&&i.toString()?this.options.prepend+i.toString():i.toString()}},{key:"shouldUseIntlApi",value:function(){return!lm.includes(this.options.compatibilityJSON)}}]),e}();function du(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function rt(e){for(var t=1;t0&&arguments[0]!==void 0?arguments[0]:{};dt(this,e),this.logger=wt.create("interpolator"),this.options=t,this.format=t.interpolation&&t.interpolation.format||function(n){return n},this.init(t)}return pt(e,[{key:"init",value:function(){var n=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};n.interpolation||(n.interpolation={escapeValue:!0});var r=n.interpolation;this.escape=r.escape!==void 0?r.escape:Xf,this.escapeValue=r.escapeValue!==void 0?r.escapeValue:!0,this.useRawValueToEscape=r.useRawValueToEscape!==void 0?r.useRawValueToEscape:!1,this.prefix=r.prefix?bn(r.prefix):r.prefixEscaped||"{{",this.suffix=r.suffix?bn(r.suffix):r.suffixEscaped||"}}",this.formatSeparator=r.formatSeparator?r.formatSeparator:r.formatSeparator||",",this.unescapePrefix=r.unescapeSuffix?"":r.unescapePrefix||"-",this.unescapeSuffix=this.unescapePrefix?"":r.unescapeSuffix||"",this.nestingPrefix=r.nestingPrefix?bn(r.nestingPrefix):r.nestingPrefixEscaped||bn("$t("),this.nestingSuffix=r.nestingSuffix?bn(r.nestingSuffix):r.nestingSuffixEscaped||bn(")"),this.nestingOptionsSeparator=r.nestingOptionsSeparator?r.nestingOptionsSeparator:r.nestingOptionsSeparator||",",this.maxReplaces=r.maxReplaces?r.maxReplaces:1e3,this.alwaysFormat=r.alwaysFormat!==void 0?r.alwaysFormat:!1,this.resetRegExp()}},{key:"reset",value:function(){this.options&&this.init(this.options)}},{key:"resetRegExp",value:function(){var n="".concat(this.prefix,"(.+?)").concat(this.suffix);this.regexp=new RegExp(n,"g");var r="".concat(this.prefix).concat(this.unescapePrefix,"(.+?)").concat(this.unescapeSuffix).concat(this.suffix);this.regexpUnescape=new RegExp(r,"g");var o="".concat(this.nestingPrefix,"(.+?)").concat(this.nestingSuffix);this.nestingRegexp=new RegExp(o,"g")}},{key:"interpolate",value:function(n,r,o,i){var s=this,l,a,u,f=this.options&&this.options.interpolation&&this.options.interpolation.defaultVariables||{};function d(O){return O.replace(/\$/g,"$$$$")}var m=function(p){if(p.indexOf(s.formatSeparator)<0){var c=ou(r,f,p);return s.alwaysFormat?s.format(c,void 0,o,rt(rt(rt({},i),r),{},{interpolationkey:p})):c}var h=p.split(s.formatSeparator),y=h.shift().trim(),S=h.join(s.formatSeparator).trim();return s.format(ou(r,f,y),S,o,rt(rt(rt({},i),r),{},{interpolationkey:y}))};this.resetRegExp();var g=i&&i.missingInterpolationHandler||this.options.missingInterpolationHandler,v=i&&i.interpolation&&i.interpolation.skipOnVariables!==void 0?i.interpolation.skipOnVariables:this.options.interpolation.skipOnVariables,k=[{regex:this.regexpUnescape,safeValue:function(p){return d(p)}},{regex:this.regexp,safeValue:function(p){return s.escapeValue?d(s.escape(p)):d(p)}}];return k.forEach(function(O){for(u=0;l=O.regex.exec(n);){var p=l[1].trim();if(a=m(p),a===void 0)if(typeof g=="function"){var c=g(n,l,i);a=typeof c=="string"?c:""}else if(i&&Object.prototype.hasOwnProperty.call(i,p))a="";else if(v){a=l[0];continue}else s.logger.warn("missed to pass in variable ".concat(p," for interpolating ").concat(n)),a="";else typeof a!="string"&&!s.useRawValueToEscape&&(a=nu(a));var h=O.safeValue(a);if(n=n.replace(l[0],h),v?(O.regex.lastIndex+=a.length,O.regex.lastIndex-=l[0].length):O.regex.lastIndex=0,u++,u>=s.maxReplaces)break}}),n}},{key:"nest",value:function(n,r){var o=this,i=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{},s,l,a;function u(g,v){var k=this.nestingOptionsSeparator;if(g.indexOf(k)<0)return g;var O=g.split(new RegExp("".concat(k,"[ ]*{"))),p="{".concat(O[1]);g=O[0],p=this.interpolate(p,a);var c=p.match(/'/g),h=p.match(/"/g);(c&&c.length%2===0&&!h||h.length%2!==0)&&(p=p.replace(/'/g,'"'));try{a=JSON.parse(p),v&&(a=rt(rt({},v),a))}catch(y){return this.logger.warn("failed parsing options string in nesting for key ".concat(g),y),"".concat(g).concat(k).concat(p)}return delete a.defaultValue,g}for(;s=this.nestingRegexp.exec(n);){var f=[];a=rt({},i),a=a.replace&&typeof a.replace!="string"?a.replace:a,a.applyPostProcessor=!1,delete a.defaultValue;var d=!1;if(s[0].indexOf(this.formatSeparator)!==-1&&!/{.*}/.test(s[1])){var m=s[1].split(this.formatSeparator).map(function(g){return g.trim()});s[1]=m.shift(),f=m,d=!0}if(l=r(u.call(this,s[1].trim(),a),a),l&&s[0]===n&&typeof l!="string")return l;typeof l!="string"&&(l=nu(l)),l||(this.logger.warn("missed to resolve ".concat(s[1]," for nesting ").concat(n)),l=""),d&&(l=f.reduce(function(g,v){return o.format(g,v,i.lng,rt(rt({},i),{},{interpolationkey:s[1].trim()}))},l.trim())),n=n.replace(s[0],l),this.regexp.lastIndex=0}return n}}]),e}();function pu(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function Lt(e){for(var t=1;t-1){var r=e.split("(");t=r[0].toLowerCase().trim();var o=r[1].substring(0,r[1].length-1);if(t==="currency"&&o.indexOf(":")<0)n.currency||(n.currency=o.trim());else if(t==="relativetime"&&o.indexOf(":")<0)n.range||(n.range=o.trim());else{var i=o.split(";");i.forEach(function(s){if(s){var l=s.split(":"),a=Kf(l),u=a[0],f=a.slice(1),d=f.join(":").trim().replace(/^'+|'+$/g,"");n[u.trim()]||(n[u.trim()]=d),d==="false"&&(n[u.trim()]=!1),d==="true"&&(n[u.trim()]=!0),isNaN(d)||(n[u.trim()]=parseInt(d,10))}})}}return{formatName:t,formatOptions:n}}function Vn(e){var t={};return function(r,o,i){var s=o+JSON.stringify(i),l=t[s];return l||(l=e(o,i),t[s]=l),l(r)}}var pm=function(){function e(){var t=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};dt(this,e),this.logger=wt.create("formatter"),this.options=t,this.formats={number:Vn(function(n,r){var o=new Intl.NumberFormat(n,Lt({},r));return function(i){return o.format(i)}}),currency:Vn(function(n,r){var o=new Intl.NumberFormat(n,Lt(Lt({},r),{},{style:"currency"}));return function(i){return o.format(i)}}),datetime:Vn(function(n,r){var o=new Intl.DateTimeFormat(n,Lt({},r));return function(i){return o.format(i)}}),relativetime:Vn(function(n,r){var o=new Intl.RelativeTimeFormat(n,Lt({},r));return function(i){return o.format(i,r.range||"day")}}),list:Vn(function(n,r){var o=new Intl.ListFormat(n,Lt({},r));return function(i){return o.format(i)}})},this.init(t)}return pt(e,[{key:"init",value:function(n){var r=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{interpolation:{}},o=r.interpolation;this.formatSeparator=o.formatSeparator?o.formatSeparator:o.formatSeparator||","}},{key:"add",value:function(n,r){this.formats[n.toLowerCase().trim()]=r}},{key:"addCached",value:function(n,r){this.formats[n.toLowerCase().trim()]=Vn(r)}},{key:"format",value:function(n,r,o){var i=this,s=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{},l=r.split(this.formatSeparator),a=l.reduce(function(u,f){var d=dm(f),m=d.formatName,g=d.formatOptions;if(i.formats[m]){var v=u;try{var k=s&&s.formatParams&&s.formatParams[s.interpolationkey]||{},O=k.locale||k.lng||s.locale||s.lng||o;v=i.formats[m](u,O,Lt(Lt(Lt({},g),s),k))}catch(p){i.logger.warn(p)}return v}else i.logger.warn("there was no format function for ".concat(m));return u},n);return a}}]),e}();function fu(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function mu(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function hm(e,t){e.pending[t]!==void 0&&(delete e.pending[t],e.pendingCount--)}var gm=function(e){Fi(n,e);var t=fm(n);function n(r,o,i){var s,l=arguments.length>3&&arguments[3]!==void 0?arguments[3]:{};return dt(this,n),s=t.call(this),Ui&&sn.call(Qt(s)),s.backend=r,s.store=o,s.services=i,s.languageUtils=i.languageUtils,s.options=l,s.logger=wt.create("backendConnector"),s.waitingReads=[],s.maxParallelReads=l.maxParallelReads||10,s.readingCalls=0,s.maxRetries=l.maxRetries>=0?l.maxRetries:5,s.retryTimeout=l.retryTimeout>=1?l.retryTimeout:350,s.state={},s.queue=[],s.backend&&s.backend.init&&s.backend.init(i,l.backend,l),s}return pt(n,[{key:"queueLoad",value:function(o,i,s,l){var a=this,u={},f={},d={},m={};return o.forEach(function(g){var v=!0;i.forEach(function(k){var O="".concat(g,"|").concat(k);!s.reload&&a.store.hasResourceBundle(g,k)?a.state[O]=2:a.state[O]<0||(a.state[O]===1?f[O]===void 0&&(f[O]=!0):(a.state[O]=1,v=!1,f[O]===void 0&&(f[O]=!0),u[O]===void 0&&(u[O]=!0),m[k]===void 0&&(m[k]=!0)))}),v||(d[g]=!0)}),(Object.keys(u).length||Object.keys(f).length)&&this.queue.push({pending:f,pendingCount:Object.keys(f).length,loaded:{},errors:[],callback:l}),{toLoad:Object.keys(u),pending:Object.keys(f),toLoadLanguages:Object.keys(d),toLoadNamespaces:Object.keys(m)}}},{key:"loaded",value:function(o,i,s){var l=o.split("|"),a=l[0],u=l[1];i&&this.emit("failedLoading",a,u,i),s&&this.store.addResourceBundle(a,u,s),this.state[o]=i?-1:2;var f={};this.queue.forEach(function(d){Gf(d.loaded,[a],u),hm(d,o),i&&d.errors.push(i),d.pendingCount===0&&!d.done&&(Object.keys(d.loaded).forEach(function(m){f[m]||(f[m]={});var g=d.loaded[m];g.length&&g.forEach(function(v){f[m][v]===void 0&&(f[m][v]=!0)})}),d.done=!0,d.errors.length?d.callback(d.errors):d.callback())}),this.emit("loaded",f),this.queue=this.queue.filter(function(d){return!d.done})}},{key:"read",value:function(o,i,s){var l=this,a=arguments.length>3&&arguments[3]!==void 0?arguments[3]:0,u=arguments.length>4&&arguments[4]!==void 0?arguments[4]:this.retryTimeout,f=arguments.length>5?arguments[5]:void 0;if(!o.length)return f(null,{});if(this.readingCalls>=this.maxParallelReads){this.waitingReads.push({lng:o,ns:i,fcName:s,tried:a,wait:u,callback:f});return}this.readingCalls++;var d=function(k,O){if(l.readingCalls--,l.waitingReads.length>0){var p=l.waitingReads.shift();l.read(p.lng,p.ns,p.fcName,p.tried,p.wait,p.callback)}if(k&&O&&a2&&arguments[2]!==void 0?arguments[2]:{},a=arguments.length>3?arguments[3]:void 0;if(!this.backend)return this.logger.warn("No backend was added via i18next.use. Will not load resources."),a&&a();typeof o=="string"&&(o=this.languageUtils.toResolveHierarchy(o)),typeof i=="string"&&(i=[i]);var u=this.queueLoad(o,i,l,a);if(!u.toLoad.length)return u.pending.length||a(),null;u.toLoad.forEach(function(f){s.loadOne(f)})}},{key:"load",value:function(o,i,s){this.prepareLoading(o,i,{},s)}},{key:"reload",value:function(o,i,s){this.prepareLoading(o,i,{reload:!0},s)}},{key:"loadOne",value:function(o){var i=this,s=arguments.length>1&&arguments[1]!==void 0?arguments[1]:"",l=o.split("|"),a=l[0],u=l[1];this.read(a,u,"read",void 0,void 0,function(f,d){f&&i.logger.warn("".concat(s,"loading namespace ").concat(u," for language ").concat(a," failed"),f),!f&&d&&i.logger.log("".concat(s,"loaded namespace ").concat(u," for language ").concat(a),d),i.loaded(o,f,d)})}},{key:"saveMissing",value:function(o,i,s,l,a){var u=arguments.length>5&&arguments[5]!==void 0?arguments[5]:{},f=arguments.length>6&&arguments[6]!==void 0?arguments[6]:function(){};if(this.services.utils&&this.services.utils.hasLoadedNamespace&&!this.services.utils.hasLoadedNamespace(i)){this.logger.warn('did not save key "'.concat(s,'" as the namespace "').concat(i,'" was not yet loaded'),"This means something IS WRONG in your setup. You access the t function before i18next.init / i18next.loadNamespace / i18next.changeLanguage was done. Wait for the callback or Promise to resolve before accessing it!!!");return}if(!(s==null||s==="")){if(this.backend&&this.backend.create){var d=mu(mu({},u),{},{isUpdate:a}),m=this.backend.create.bind(this.backend);if(m.length<6)try{var g;m.length===5?g=m(o,i,s,l,d):g=m(o,i,s,l),g&&typeof g.then=="function"?g.then(function(v){return f(null,v)}).catch(f):f(null,g)}catch(v){f(v)}else m(o,i,s,l,f,d)}!o||!o[0]||this.store.addResource(o[0],i,s,l)}}}]),n}(sn);function hu(){return{debug:!1,initImmediate:!0,ns:["translation"],defaultNS:["translation"],fallbackLng:["dev"],fallbackNS:!1,supportedLngs:!1,nonExplicitSupportedLngs:!1,load:"all",preload:!1,simplifyPluralSuffix:!0,keySeparator:".",nsSeparator:":",pluralSeparator:"_",contextSeparator:"_",partialBundledLanguages:!1,saveMissing:!1,updateMissing:!1,saveMissingTo:"fallback",saveMissingPlurals:!0,missingKeyHandler:!1,missingInterpolationHandler:!1,postProcess:!1,postProcessPassResolved:!1,returnNull:!0,returnEmptyString:!0,returnObjects:!1,joinArrays:!1,returnedObjectHandler:!1,parseMissingKeyHandler:!1,appendNamespaceToMissingKey:!1,appendNamespaceToCIMode:!1,overloadTranslationOptionHandler:function(t){var n={};if(ve(t[1])==="object"&&(n=t[1]),typeof t[1]=="string"&&(n.defaultValue=t[1]),typeof t[2]=="string"&&(n.tDescription=t[2]),ve(t[2])==="object"||ve(t[3])==="object"){var r=t[3]||t[2];Object.keys(r).forEach(function(o){n[o]=r[o]})}return n},interpolation:{escapeValue:!0,format:function(t,n,r,o){return t},prefix:"{{",suffix:"}}",formatSeparator:",",unescapePrefix:"-",nestingPrefix:"$t(",nestingSuffix:")",nestingOptionsSeparator:",",maxReplaces:1e3,skipOnVariables:!0}}}function gu(e){return typeof e.ns=="string"&&(e.ns=[e.ns]),typeof e.fallbackLng=="string"&&(e.fallbackLng=[e.fallbackLng]),typeof e.fallbackNS=="string"&&(e.fallbackNS=[e.fallbackNS]),e.supportedLngs&&e.supportedLngs.indexOf("cimode")<0&&(e.supportedLngs=e.supportedLngs.concat(["cimode"])),e}function vu(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(o){return Object.getOwnPropertyDescriptor(e,o).enumerable})),n.push.apply(n,r)}return n}function vt(e){for(var t=1;t"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function _o(){}function _m(e){var t=Object.getOwnPropertyNames(Object.getPrototypeOf(e));t.forEach(function(n){typeof e[n]=="function"&&(e[n]=e[n].bind(e))})}var ui=function(e){Fi(n,e);var t=vm(n);function n(){var r,o=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},i=arguments.length>1?arguments[1]:void 0;if(dt(this,n),r=t.call(this),Ui&&sn.call(Qt(r)),r.options=gu(o),r.services={},r.logger=wt,r.modules={external:[]},_m(Qt(r)),i&&!r.isInitialized&&!o.isClone){if(!r.options.initImmediate)return r.init(o,i),co(r,Qt(r));setTimeout(function(){r.init(o,i)},0)}return r}return pt(n,[{key:"init",value:function(){var o=this,i=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},s=arguments.length>1?arguments[1]:void 0;typeof i=="function"&&(s=i,i={}),!i.defaultNS&&i.defaultNS!==!1&&i.ns&&(typeof i.ns=="string"?i.defaultNS=i.ns:i.ns.indexOf("translation")<0&&(i.defaultNS=i.ns[0]));var l=hu();this.options=vt(vt(vt({},l),this.options),gu(i)),this.options.compatibilityAPI!=="v1"&&(this.options.interpolation=vt(vt({},l.interpolation),this.options.interpolation)),i.keySeparator!==void 0&&(this.options.userDefinedKeySeparator=i.keySeparator),i.nsSeparator!==void 0&&(this.options.userDefinedNsSeparator=i.nsSeparator);function a(p){return p?typeof p=="function"?new p:p:null}if(!this.options.isClone){this.modules.logger?wt.init(a(this.modules.logger),this.options):wt.init(null,this.options);var u;this.modules.formatter?u=this.modules.formatter:typeof Intl<"u"&&(u=pm);var f=new uu(this.options);this.store=new nm(this.options.resources,this.options);var d=this.services;d.logger=wt,d.resourceStore=this.store,d.languageUtils=f,d.pluralResolver=new um(f,{prepend:this.options.pluralSeparator,compatibilityJSON:this.options.compatibilityJSON,simplifyPluralSuffix:this.options.simplifyPluralSuffix}),u&&(!this.options.interpolation.format||this.options.interpolation.format===l.interpolation.format)&&(d.formatter=a(u),d.formatter.init(d,this.options),this.options.interpolation.format=d.formatter.format.bind(d.formatter)),d.interpolator=new cm(this.options),d.utils={hasLoadedNamespace:this.hasLoadedNamespace.bind(this)},d.backendConnector=new gm(a(this.modules.backend),d.resourceStore,d,this.options),d.backendConnector.on("*",function(p){for(var c=arguments.length,h=new Array(c>1?c-1:0),y=1;y1?c-1:0),y=1;y0&&m[0]!=="dev"&&(this.options.lng=m[0])}!this.services.languageDetector&&!this.options.lng&&this.logger.warn("init: no languageDetector is used and no lng is defined");var g=["getResource","hasResourceBundle","getResourceBundle","getDataByLanguage"];g.forEach(function(p){o[p]=function(){var c;return(c=o.store)[p].apply(c,arguments)}});var v=["addResource","addResources","addResourceBundle","removeResourceBundle"];v.forEach(function(p){o[p]=function(){var c;return(c=o.store)[p].apply(c,arguments),o}});var k=yr(),O=function(){var c=function(y,S){o.isInitialized&&!o.initializedStoreOnce&&o.logger.warn("init: i18next is already initialized. You should call init just once!"),o.isInitialized=!0,o.options.isClone||o.logger.log("initialized",o.options),o.emit("initialized",o.options),k.resolve(S),s(y,S)};if(o.languages&&o.options.compatibilityAPI!=="v1"&&!o.isInitialized)return c(null,o.t.bind(o));o.changeLanguage(o.options.lng,c)};return this.options.resources||!this.options.initImmediate?O():setTimeout(O,0),k}},{key:"loadResources",value:function(o){var i=this,s=arguments.length>1&&arguments[1]!==void 0?arguments[1]:_o,l=s,a=typeof o=="string"?o:this.language;if(typeof o=="function"&&(l=o),!this.options.resources||this.options.partialBundledLanguages){if(a&&a.toLowerCase()==="cimode")return l();var u=[],f=function(g){if(g){var v=i.services.languageUtils.toResolveHierarchy(g);v.forEach(function(k){u.indexOf(k)<0&&u.push(k)})}};if(a)f(a);else{var d=this.services.languageUtils.getFallbackCodes(this.options.fallbackLng);d.forEach(function(m){return f(m)})}this.options.preload&&this.options.preload.forEach(function(m){return f(m)}),this.services.backendConnector.load(u,this.options.ns,function(m){!m&&!i.resolvedLanguage&&i.language&&i.setResolvedLanguage(i.language),l(m)})}else l(null)}},{key:"reloadResources",value:function(o,i,s){var l=yr();return o||(o=this.languages),i||(i=this.options.ns),s||(s=_o),this.services.backendConnector.reload(o,i,function(a){l.resolve(),s(a)}),l}},{key:"use",value:function(o){if(!o)throw new Error("You are passing an undefined module! Please check the object you are passing to i18next.use()");if(!o.type)throw new Error("You are passing a wrong module! Please check the object you are passing to i18next.use()");return o.type==="backend"&&(this.modules.backend=o),(o.type==="logger"||o.log&&o.warn&&o.error)&&(this.modules.logger=o),o.type==="languageDetector"&&(this.modules.languageDetector=o),o.type==="i18nFormat"&&(this.modules.i18nFormat=o),o.type==="postProcessor"&&qc.addPostProcessor(o),o.type==="formatter"&&(this.modules.formatter=o),o.type==="3rdParty"&&this.modules.external.push(o),this}},{key:"setResolvedLanguage",value:function(o){if(!(!o||!this.languages)&&!(["cimode","dev"].indexOf(o)>-1))for(var i=0;i-1)&&this.store.hasLanguageSomeTranslations(s)){this.resolvedLanguage=s;break}}}},{key:"changeLanguage",value:function(o,i){var s=this;this.isLanguageChangingTo=o;var l=yr();this.emit("languageChanging",o);var a=function(m){s.language=m,s.languages=s.services.languageUtils.toResolveHierarchy(m),s.resolvedLanguage=void 0,s.setResolvedLanguage(m)},u=function(m,g){g?(a(g),s.translator.changeLanguage(g),s.isLanguageChangingTo=void 0,s.emit("languageChanged",g),s.logger.log("languageChanged",g)):s.isLanguageChangingTo=void 0,l.resolve(function(){return s.t.apply(s,arguments)}),i&&i(m,function(){return s.t.apply(s,arguments)})},f=function(m){!o&&!m&&s.services.languageDetector&&(m=[]);var g=typeof m=="string"?m:s.services.languageUtils.getBestMatchFromCodes(m);g&&(s.language||a(g),s.translator.language||s.translator.changeLanguage(g),s.services.languageDetector&&s.services.languageDetector.cacheUserLanguage&&s.services.languageDetector.cacheUserLanguage(g)),s.loadResources(g,function(v){u(v,g)})};return!o&&this.services.languageDetector&&!this.services.languageDetector.async?f(this.services.languageDetector.detect()):!o&&this.services.languageDetector&&this.services.languageDetector.async?this.services.languageDetector.detect.length===0?this.services.languageDetector.detect().then(f):this.services.languageDetector.detect(f):f(o),l}},{key:"getFixedT",value:function(o,i,s){var l=this,a=function u(f,d){var m;if(ve(d)!=="object"){for(var g=arguments.length,v=new Array(g>2?g-2:0),k=2;k1&&arguments[1]!==void 0?arguments[1]:{};if(!this.isInitialized)return this.logger.warn("hasLoadedNamespace: i18next was not initialized",this.languages),!1;if(!this.languages||!this.languages.length)return this.logger.warn("hasLoadedNamespace: i18n.languages were undefined or empty",this.languages),!1;var l=this.resolvedLanguage||this.languages[0],a=this.options?this.options.fallbackLng:!1,u=this.languages[this.languages.length-1];if(l.toLowerCase()==="cimode")return!0;var f=function(g,v){var k=i.services.backendConnector.state["".concat(g,"|").concat(v)];return k===-1||k===2};if(s.precheck){var d=s.precheck(this,f);if(d!==void 0)return d}return!!(this.hasResourceBundle(l,o)||!this.services.backendConnector.backend||this.options.resources&&!this.options.partialBundledLanguages||f(l,o)&&(!a||f(u,o)))}},{key:"loadNamespaces",value:function(o,i){var s=this,l=yr();return this.options.ns?(typeof o=="string"&&(o=[o]),o.forEach(function(a){s.options.ns.indexOf(a)<0&&s.options.ns.push(a)}),this.loadResources(function(a){l.resolve(),i&&i(a)}),l):(i&&i(),Promise.resolve())}},{key:"loadLanguages",value:function(o,i){var s=yr();typeof o=="string"&&(o=[o]);var l=this.options.preload||[],a=o.filter(function(u){return l.indexOf(u)<0});return a.length?(this.options.preload=l.concat(a),this.loadResources(function(u){s.resolve(),i&&i(u)}),s):(i&&i(),Promise.resolve())}},{key:"dir",value:function(o){if(o||(o=this.resolvedLanguage||(this.languages&&this.languages.length>0?this.languages[0]:this.language)),!o)return"rtl";var i=["ar","shu","sqr","ssh","xaa","yhd","yud","aao","abh","abv","acm","acq","acw","acx","acy","adf","ads","aeb","aec","afb","ajp","apc","apd","arb","arq","ars","ary","arz","auz","avl","ayh","ayl","ayn","ayp","bbz","pga","he","iw","ps","pbt","pbu","pst","prp","prd","ug","ur","ydd","yds","yih","ji","yi","hbo","men","xmn","fa","jpr","peo","pes","prs","dv","sam","ckb"],s=this.services&&this.services.languageUtils||new uu(hu());return i.indexOf(s.getLanguagePartFromCode(o))>-1||o.toLowerCase().indexOf("-arab")>1?"rtl":"ltr"}},{key:"cloneInstance",value:function(){var o=this,i=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},s=arguments.length>1&&arguments[1]!==void 0?arguments[1]:_o,l=vt(vt(vt({},this.options),i),{isClone:!0}),a=new n(l);(i.debug!==void 0||i.prefix!==void 0)&&(a.logger=a.logger.clone(i));var u=["store","services","language"];return u.forEach(function(f){a[f]=o[f]}),a.services=vt({},this.services),a.services.utils={hasLoadedNamespace:a.hasLoadedNamespace.bind(a)},a.translator=new au(a.services,a.options),a.translator.on("*",function(f){for(var d=arguments.length,m=new Array(d>1?d-1:0),g=1;g0&&arguments[0]!==void 0?arguments[0]:{},t=arguments.length>1?arguments[1]:void 0;return new ui(e,t)});var fe=ui.createInstance();fe.createInstance=ui.createInstance;fe.createInstance;fe.dir;fe.init;fe.loadResources;fe.reloadResources;fe.use;fe.changeLanguage;fe.getFixedT;fe.t;fe.exists;fe.setDefaultNamespace;fe.hasLoadedNamespace;fe.loadNamespaces;fe.loadLanguages;function km(e,t){if(e==null)return{};var n={},r=Object.keys(e),o,i;for(i=0;i=0)&&(n[o]=e[o]);return n}function Ql(e,t){if(e==null)return{};var n=km(e,t),r,o;if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(o=0;o=0)&&Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var wm={area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0},Sm=/\s([^'"/\s><]+?)[\s/>]|([^\s=]+)=\s?(".*?"|'.*?')/g;function yu(e){var t={type:"tag",name:"",voidElement:!1,attrs:{},children:[]},n=e.match(/<\/?([^\s]+?)[/\s>]/);if(n&&(t.name=n[1],(wm[n[1]]||e.charAt(e.length-2)==="/")&&(t.voidElement=!0),t.name.startsWith("!--"))){var r=e.indexOf("-->");return{type:"comment",comment:r!==-1?e.slice(4,r):""}}for(var o=new RegExp(Sm),i=null;(i=o.exec(e))!==null;)if(i[0].trim())if(i[1]){var s=i[1].trim(),l=[s,""];s.indexOf("=")>-1&&(l=s.split("=")),t.attrs[l[0]]=l[1],o.lastIndex--}else i[2]&&(t.attrs[i[2]]=i[3].trim().substring(1,i[3].length-1));return t}var Om=/<[a-zA-Z0-9\-\!\/](?:"[^"]*"|'[^']*'|[^'">])*>/g,Em=/^\s*$/,Nm=Object.create(null);function Gc(e,t){switch(t.type){case"text":return e+t.content;case"tag":return e+="<"+t.name+(t.attrs?function(n){var r=[];for(var o in n)r.push(o+'="'+n[o]+'"');return r.length?" "+r.join(" "):""}(t.attrs):"")+(t.voidElement?"/>":">"),t.voidElement?e:e+t.children.reduce(Gc,"")+"";case"comment":return e+""}}var Pm={parse:function(e,t){t||(t={}),t.components||(t.components=Nm);var n,r=[],o=[],i=-1,s=!1;if(e.indexOf("<")!==0){var l=e.indexOf("<");r.push({type:"text",content:l===-1?e:e.substring(0,l)})}return e.replace(Om,function(a,u){if(s){if(a!=="")return;s=!1}var f,d=a.charAt(1)!=="/",m=a.startsWith("");return{type:"comment",comment:r!==-1?e.slice(4,r):""}}for(var o=new RegExp(wm),i=null;(i=o.exec(e))!==null;)if(i[0].trim())if(i[1]){var s=i[1].trim(),l=[s,""];s.indexOf("=")>-1&&(l=s.split("=")),t.attrs[l[0]]=l[1],o.lastIndex--}else i[2]&&(t.attrs[i[2]]=i[3].trim().substring(1,i[3].length-1));return t}var Sm=/<[a-zA-Z0-9\-\!\/](?:"[^"]*"|'[^']*'|[^'">])*>/g,Om=/^\s*$/,Em=Object.create(null);function qc(e,t){switch(t.type){case"text":return e+t.content;case"tag":return e+="<"+t.name+(t.attrs?function(n){var r=[];for(var o in n)r.push(o+'="'+n[o]+'"');return r.length?" "+r.join(" "):""}(t.attrs):"")+(t.voidElement?"/>":">"),t.voidElement?e:e+t.children.reduce(qc,"")+"";case"comment":return e+""}}var Nm={parse:function(e,t){t||(t={}),t.components||(t.components=Em);var n,r=[],o=[],i=-1,s=!1;if(e.indexOf("<")!==0){var l=e.indexOf("<");r.push({type:"text",content:l===-1?e:e.substring(0,l)})}return e.replace(Sm,function(a,u){if(s){if(a!=="")return;s=!1}var p,d=a.charAt(1)!=="/",m=a.startsWith("