diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp index 403e2f2f808e96..06dd6a56e50f93 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp @@ -139,15 +139,11 @@ void ReduceCPULayerTest::SetUp() { function = makeNgraphFunction(netPrecision, params, reduce, "Reduce"); - if (netPrecision == ov::element::f32 && configuration.count(ov::hint::inference_precision.name()) && - (configuration.at(ov::hint::inference_precision.name()) == ov::element::f16 || - configuration.at(ov::hint::inference_precision.name()) == ov::element::bf16)) { - abs_threshold = 5e-3; - // if (ov::with_cpu_x86_avx512_core_amx()) { - // abs_threshold = 5e-3; - // } else { - // abs_threshold = 5e-2; - // } + if (ov::with_cpu_x86_avx512_core_amx()) { + if (netPrecision == ov::element::f32 && configuration.count(ov::hint::inference_precision.name()) && + configuration.at(ov::hint::inference_precision.name()) == ov::element::f16) { + abs_threshold = 5e-3; + } } } diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp index c3d1f77d1f90e1..47d7d3072b7337 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp @@ -185,6 +185,12 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterface kernel, stride, dilation; std::vector padBegin, padEnd; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/loop.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/loop.cpp index c03984ccc9f676..ede3b3e2339086 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/loop.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/loop.cpp @@ -175,7 +175,6 @@ class LoopWhileLayerCPUTest : public LoopLayerCPUTest { bool exec_cond; std::vector shapes; std::vector types; - // ov::element::Type in_type; std::tie(trip_count_type, trip_count, exec_cond, shapes, types, inType) = this->GetParam(); targetDevice = ov::test::utils::DEVICE_CPU; @@ -249,7 +248,6 @@ class LoopForDiffShapesLayerCPUTest : public LoopLayerCPUTest { bool exec_cond; std::vector shapes; std::vector types; - ov::element::Type inType; std::tie(trip_count_type, trip_count, exec_cond, shapes, types, inType) = this->GetParam(); targetDevice = ov::test::utils::DEVICE_CPU; @@ -328,7 +326,6 @@ class LoopForConcatLayerCPUTest : public LoopLayerCPUTest { bool exec_cond; std::vector shapes; std::vector types; - // ov::element::Type in_type; std::tie(trip_count_type, trip_count, exec_cond, shapes, types, inType) = this->GetParam(); targetDevice = ov::test::utils::DEVICE_CPU; diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/index_add_scatter_elements_update.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/index_add_scatter_elements_update.cpp index 9934ceed3bd6b4..4aa422fa263b18 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/index_add_scatter_elements_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/index_add_scatter_elements_update.cpp @@ -98,7 +98,7 @@ class IndexAddTest : public testing::WithParamInterface, if (ov::element::bf16 == data_type || ov::element::f16 == data_type) { configuration.insert({ov::hint::inference_precision.name(), data_type}); - outType = data_type; + inType = outType = data_type; abs_threshold = 0.01f; rel_threshold = 0.01f; } @@ -209,14 +209,12 @@ class IndexAddTest : public testing::WithParamInterface, // All index values are expected to be within bounds [-d, d - 1] along dimension d pointed by axis. auto d = dataShape[normalized_axis]; in_data.start_from = -1.0 * static_cast(d); - // in_data.range = static_cast(d-1 - in_data.start_from); - in_data.range = d - 1; + in_data.range = d-1; in_data.resolution = 1; tensor = shape_size(indicesShape) == 0 ? ov::Tensor(funcInput.get_element_type(), indicesShape) : ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), indicesShape, in_data); } else if (i == 2) { // "updates" in_data.start_from = -50; - // in_data.range = 100; in_data.range = 50; in_data.resolution = 1; tensor = shape_size(updateShape) == 0 ? ov::Tensor(funcInput.get_element_type(), updateShape) : diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 162331cf44f02d..85e8c2e10615b7 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -313,28 +313,6 @@ std::vector disabledTestPatterns() { R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/.*trip_count=5_exec_cond=1_netType=i8.*)", R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/Input0_IS=\[\?.1.\?\]_TS=\(10.1.10\)_\(1.1.1\)_\(1.1.1\)_\(5.1.3\)_Input1_IS=\[\?.\?.\?\]_TS=.*_Input2_IS=\[\?.1.\?\]_.*_types=0_0_1_trip_count_type=.*_trip_count=(1|5)_exec_cond=1_netType=i8.*)", R"(.*smoke_LoopForCommon/LoopLayerCPUTest.CompareWithRefs/Input0_IS=\[1..10.1.1..10\]_.*_Input1_IS=\[1..8.1.1..8\]_.*_Input2_IS=\[1..10.\?.1..10\]_TS=.*_types=0_0_1_trip_count_type=.*_trip_count=(1|5)_exec_cond=1_netType=i8.*)", - // R"(.*smoke_Reduce_MultiAxis_4D_fusing_CPU/ReduceCPULayerTest.CompareWithRefs/.*=VECTOR_type=(Max|ReduceL2|Mean)_.*_INFERENCE_PRECISION_HINT=bf16_.*_Fused=Multiply\(PerChannel\).Add\(PerChannel\).*)", - // R"(.*smoke_Reduce_MultiAxis_4D_fusing_CPU/ReduceCPULayerTest.CompareWithRefs/IS=.*_axes=\((0.2|0.3)\)_opType=VECTOR_type=Mean_.*_INFERENCE_PRECISION_HINT=bf16_.*_Fused=Multiply\(PerChannel\).Add\(PerChannel\).*)", - // R"(.*smoke_Reduce_OneAxis_fusing_CPU/ReduceCPULayerTest.CompareWithRefs/.*_INFERENCE_PRECISION_HINT=bf16_Fused=Multiply\(PerChannel\).Add\(PerChannel\).*)", - // R"(.*smoke_Reduce_OneAxis_CPU/ReduceCPULayerTest.CompareWithRefs/IS=.*_axes=\((1|3)\)_opType=.*_type=(ReduceL1|Sum)_.*_INFERENCE_PRECISION_HINT=bf16.*)", - // R"(.*smoke_Reduce_MultiAxis_4D_CPU/ReduceCPULayerTest.CompareWithRefs/IS=.*_axes=\((0.1|0.3|1.2|1.3|2.3|0.1.2|0.1.3|0.2.3|1.2.3)\)_opType=.*_type=(ReduceL1|Sum)_.*_INFERENCE_PRECISION_HINT=bf16.*)", - // R"(.*smoke_Reduce_MultiAxis_5D_fusing_CPU/ReduceCPULayerTest.CompareWithRefs/IS=.*_axes=\((2.4|0.2.4|0.1.2.3.4)\)_opType=.*_type=(ReduceL1|Mean|ReduceL2|Max)_KeepDims=true_netPRC=f32_.*_INFERENCE_PRECISION_HINT=bf16_.*_Fused=Multiply\(PerChannel\).Add\(PerChannel\).*)", - // R"(.*smoke_Reduce_MultiAxis_5D_fusing_CPU/ReduceCPULayerTest.CompareWithRefs/IS=.*_axes=\(1.2.4\)_opType=.*_type=(ReduceL2|Max)_KeepDims=true_netPRC=f32_.*_INFERENCE_PRECISION_HINT=bf16_.*_Fused=Multiply\(PerChannel\).Add\(PerChannel\).*)", - // R"(.*smoke_Reduce_SingleBatch_CPU/ReduceCPULayerTest.CompareWithRefs/.*_axes=\((1|3)\)_opType=.*_type=(ReduceL1|Sum)_KeepDims=true_netPRC=f32_.*_INFERENCE_PRECISION_HINT=bf16_.*)", - // R"(.*smoke_Reduce_MultiAxis_5D_CPU/ReduceCPULayerTest.CompareWithRefs/IS=.*_axes=\((2.4|0.2.4)\)_opType=.*_type=(ReduceL1|Sum)_KeepDims=true_netPRC=f32_.*_INFERENCE_PRECISION_HINT=bf16.*)", - // R"(.*smoke_Reduce_MultiAxis_4D_dynamic_CPU/ReduceCPULayerTest.CompareWithRefs/IS=.*_axes=\(0.1\)_opType=.*_type=(ReduceL1|Sum)_KeepDims=true_netPRC=f32_.*_INFERENCE_PRECISION_HINT=bf16.*)", - // R"(.*smoke_Reduce_NHWC_SmallChannel_CPU/ReduceCPULayerTest.CompareWithRefs/IS=.*_axes=\(2.3\)_opType=.*_type=(ReduceL1|Sum)_KeepDims=true_netPRC=f32_.*_INFERENCE_PRECISION_HINT=bf16.*)", - // R"(.*smoke_GatherTree/GatherTreeLayerTest.Inference/IS=\(20.(1|20).10\)_secondary_input_type=CONSTANT_netPRC=f32.*)", - // R"(.*smoke_FakeQuantizeLayerCPUTest_5D_jit/FakeQuantizeLayerCPUTest.CompareWithRefs/IS=\[\?.\?.\?.\?.\?\]_TS=\(\(4.16.6.7.8\)\)_\(\(1.16.1.1.1\)\)_.*_inPrec=f32_LOW_BOUNDS=-10_HIGH_BOUNDS=10_IL=\(-10\)_IH=\(-5\)_OL=\(5\)_OH=\(25\)_LEVELS=256_.*)", - // R"(.*smoke_basic/PermConvPermConcat.CompareWithRefs/IS=\(1.1.8.16\)_KS=\(1.3\)_OC=(32|64)_ET=f32_targetDevice=CPU.*)", - // R"(.*smoke_GatherTreeCPUStatic/GatherTreeLayerCPUTest.CompareWithRefs/IS=.*_TS=\(20.(1|20).10\)_secondaryInputType=CONSTANT_netPRC=f32_inPRC=undefined_outPRC=undefined_trgDev=CPU.*)", - // R"(.*smoke_GatherTreeCPUDynamicConstant/GatherTreeLayerCPUTest.CompareWithRefs/IS=.*_TS=\((7.1.10|2.1.7|20.1.10|20.20.15)\)_secondaryInputType=CONSTANT_netPRC=f32_inPRC=undefined_outPRC=undefined_trgDev=CPU.*)", - // R"(.*smoke_GroupConv_3D_Gemm_FP32/GroupConvolutionLayerCPUTest.CompareWithRefs/IS=\[\].*S\(1.1.1\).*O=6_G=3_AP=explicit_netPRC=f32_.*inFmts=(ndhwc|ncdhw)_outFmts=(ndhwc|ncdhw)_.*_Fused=FakeQuantize\(PerTensor\).Relu.*)", - // R"(.*smoke_GroupConv_3D_Gemm_FP32/GroupConvolutionLayerCPUTest.CompareWithRefs/IS=\[1..200.12.\?.1..200.\?\]_.*_S\(1.1.1\)_.*_O=6_G=3_AP=explicit_netPRC=f32_inPRC=undefined_outPRC=undefined_trgDev=CPU_inFmts=(ndhwc|ncdhw)_outFmts=(ndhwc|ncdhw)_primitive=jit_gemm_Fused=FakeQuantize\(PerTensor\).Relu.*)", - // R"(.*smoke_JIT_SSE42_DW_GroupConv/GroupConvolutionLayerCPUTest.CompareWithRefs/IS=.*_TS=\(\(2.8.129.129\)_\)_K\(3.3\)_S\(2.2\)_PB\(1.1\)_PE\(1.1\)_D=\(1.1\)_O=8_G=8_AP=explicit_netPRC=f32_.*_inFmts=(nChw8c|nhwc)_outFmts=(nChw8c|nhwc)_primitive=jit_sse42_dw_Fused=Add\(Parameters\).Elu.FakeQuantize\(PerTensor\).*)", - // R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\?.\?\]_\[1.129.1\]_TS=\(\(1.129\)_\(2.129\)_\(1.129\)_\(2.129\)\)_\(\(1.129.1\)_\(1.129.1\)_\(1.129.1\)_\(1.129.1\)\)_transpose_a=1_transpose_b=1_secondaryInputType=CONSTANT_netPRC=(bf16|f32).*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\).*)", - // R"(.*smoke_MatMulCompressedWeights_non_default_dyn_quant_group_sizes/MatmulWeightsDecompression.CompareWithRefs/data_shape=\[\]_\(\[1,1,1728\]\)_weights_shape=\[1728,128\]_group_size=64_weights_precision=(u8|u4)_decompression_precision=f32_transpose_weights= - // 1_decompression_subtract=full_reshape_on_decompression=0_config=\(DYNAMIC_QUANTIZATION_GROUP_SIZE, 128:\).*)", }; #if defined(OPENVINO_ARCH_X86) @@ -388,26 +366,6 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(smoke_VariableState/OVInferRequestVariableStateTest.*)"); // Issue: 141705 retVector.emplace_back(R"(.*smoke_arm_Deconv_2D_Planar_FP16/DeconvolutionLayerCPUTest.*INFERENCE_PRECISION_HINT=f16.*)"); - // fill_data_random fix - // retVector.emplace_back(R"(.*smoke_arm_Deconv_2D_Planar_FP16/DeconvolutionLayerCPUTest.*INFERENCE_PRECISION_HINT=f16.*)"); - // retVector.emplace_back(R"(.*smoke_CompareWithRefs_dynamic/EltwiseLayerTest.Inference/IS=.*1..10.200.*1..10.200.*_TS=.*2.200.*1.200.*2.200.*5.200.*_eltwise_op_type=Sum_secondary_input_type=PARAMETER_opType=VECTOR_model_type=i32_.*)"); - // retVector.emplace_back(R"(.*smoke_CompareWithRefs_static_check_collapsing/EltwiseLayerTest.Inference/IS=.*_TS=\(\(16.16.16.16\)_\(16.16.(16.1|1.16)\)_\)_eltwise_op_type=Sum_secondary_input_type=PARAMETER_opType=VECTOR_model_type=i32_.*)"); - // retVector.emplace_back(R"(.*smoke_CompareWithRefs_static_check_collapsing/EltwiseLayerTest.Inference/IS=.*_TS=\(\(16.16.16.(16|1)\)_\(16.16.16.1\)_\)_eltwise_op_type=SqDiff_secondary_input_type=.*_opType=VECTOR_model_type=i32.*)"); - // retVector.emplace_back(R"(.*smoke_CompareWithRefs_static_check_collapsing/EltwiseLayerTest.Inference/IS=.*_TS=\(\(16.16.(16|1).16\)_\(16.16.1.16\)_\)_eltwise_op_type=SqDiff_secondary_input_type=.*_opType=VECTOR_model_type=i32.*)"); - // retVector.emplace_back(R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS=.*_TS=\(\(16.16.16.(16|1).(16|1)\)_\)_eltwise_op_type=Sum_secondary_input_type=PARAMETER_opType=SCALAR_model_type=i32.*)"); - // retVector.emplace_back(R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition0_batch=5_hidden_size=10_input_size=1_IS=\(5.1\)\(5.10\)\(5.10\)\(40.1\)\(40.10\)\(40\)_activations=\(relu.(sigmoid.tanh|relu.relu)\)_clip=0_WType=.*_RType=CONSTANT_BType=PARAMETER_modelType=f16.*)"); - // retVector.emplace_back(R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference/decomposition0_batch=5_hidden_size=10_input_size=30_IS=\(5.30\)\(5.10\)\(5.10\)\(40.30\)\(40.10\)\(40\)_activations=\(relu.(sigmoid.tanh|relu.relu)\)_clip=0_WType=.*_RType=CONSTANT_BType=PARAMETER_modelType=f16.*)"); - // retVector.emplace_back(R"(.*smoke_GRUCellCommon/GRUCellTest.Inference/decomposition1_batch=5_hidden_size=1_input_size=1_IS=\(5.1\)\(5.1\)\(3.1\)\(3.1\)\(4\)_activations=\(tanh.relu\)_clip=0_linear_before_reset=1_WType=.*_RType=.*_BType=CONSTANT_netPRC=f32_.*)"); - // retVector.emplace_back(R"(.*smoke_GRUCellCommon/GRUCellTest.Inference/decomposition1_batch=5_hidden_size=1_input_size=30_IS=\(5.30\)\(5.1\)\(3.30\)\(3.1\)\(4\)_activations=\(tanh.relu\)_clip=0_linear_before_reset=1_WType=.*_RType=.*_BType=CONSTANT_netPRC=f32_.*)"); - // retVector.emplace_back(R"(.*smoke_GRUCellCommon/GRUCellTest.Inference/decomposition1_batch=5_hidden_size=10_input_size=1_IS=\(5.1\)\(5.10\)\(30.1\)\(30.10\)\((40|30)\)_activations=\(tanh.relu\)_clip=0_linear_before_reset=(0|1)_WType=.*_RType=.*_BType=CONSTANT_netPRC=f32_.*)"); - // retVector.emplace_back(R"(.*smoke_GRUCellCommon/GRUCellTest.Inference/decomposition1_batch=5_hidden_size=10_input_size=30_IS=\(5.30\)\(5.10\)\(30.30\)\(30.10\)\(30\)_activations=\(tanh.relu\)_clip=0_linear_before_reset=0_WType=.*_RType=.*_BType=CONSTANT_netPRC=f32.*)"); - // retVector.emplace_back(R"(.*moke_Activation5D_dynamicMath_CPU/ActivationLayerCPUTest.CompareWithRefs/Log_IS=\(\[?.?\]\)_TS=\(1.50\)_\(5.128\)_\(3.64\)_AS=\(\)_ConstantsValue=\(\)_netPRC=f32_inPRC=f32_outPRC=f32_.*)"); - // retVector.emplace_back(R"(.*moke_Activation5D_dynamicMath_CPU/ActivationLayerCPUTest.CompareWithRefs/Log_IS=\(\[1..5.128\]\)_TS=\(1.128\)_\(3.128\)_\(5.128\)_AS=\(\)_ConstantsValue=\(\)_netPRC=f32_inPRC=f32_outPRC=f32_.*)"); - // retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/EltwiseChainTest.CompareWithRefs/IS=.*_TS=\(\(1.1.2.3\)_\(1.1.2.3\)_\(1.1.2.3\)_InPRC0=f16_InPRC1=f32_InPRC2=f32_Op0=Div_secondaryInputType=CONSTANT_WithQuant=0_Conversion=(i8|u8).*)"); - // retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/EltwiseChainTest.CompareWithRefs/IS=.*_TS=\(\(1.1.2.3\)_\(1.1.2.3\)_\(1.1.2.3\)_InPRC0=f32_InPRC1=f32_InPRC2=f32_Op0=Prod_secondaryInputType=CONSTANT_WithQuant=0_Conversion=(i8|u8).*)"); - // // to long - // retVector.emplace_back(R"(.*smoke_TensorIteratorCommonClip/TensorIteratorTest.Inference/.*_TensorIteratorBody=LSTM_.*_modelType=(f16|f32).*)"); - // retVector.emplace_back(R"(.*smoke_EltwiseChain_MergeConvert_int8/EltwiseChainTest.CompareWithRefs/IS=.*_TS=\(\(1.1.2.3\)_\(1.1.2.3\)_\(1.1.2.3\)_InPRC0=f16_InPRC1=f32_InPRC2=f32_Op0=(Prod|Sum)_secondaryInputType=CONSTANT_WithQuant=0_Conversion=(i8|u8)_.*)"); #endif #if defined(OPENVINO_ARCH_ARM) @@ -563,14 +521,6 @@ std::vector disabledTestPatterns() { // Issue: 141705 retVector.emplace_back(R"(.*smoke_Deconv_(2|3)D_NSPC_INT8_AMX/DeconvolutionLayerCPUTest.*)"); retVector.emplace_back(R"(.*smoke_Deconv_(2|3)D_NSPC_INT8_AMX/DeconvolutionLayerCPUTest.*)"); - // range - // retVector.emplace_back(R"(.*smoke_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\?.\?\]_\[1.129.1\]_.*_netPRC=(f32|bf16)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)"); - // retVector.emplace_back(R"(.*smoke_MatMulCompressedWeights_non_default_dyn_quant_group_sizes/MatmulWeightsDecompression.CompareWithRefs/.*_\(\[1,1,1728\]\)_.*_precision=(u8|u4)_decompression_precision=f32_.*_subtract=full_reshape_on_decompression=0_config=\(DYNAMIC_QUANTIZATION_GROUP_SIZE.*128.*Fused=fusingBias.*)"); - // retVector.emplace_back(R"(.*smoke_LoopForDiffShapesConcat/LoopForDiffShapesLayerCPUTest.CompareWithRefs/Input0_IS=.*_TS=\(10.1.10\)_\(1.10.1\)_\(1.10.1\)_\(2.2.2\)_types=trip_count_type=PARAMETER_trip_count=(1|5)_exec_cond=1_netType=bf16.*)"); - // retVector.emplace_back(R"(.*smoke_LoopForDiffShapesConcat/LoopForDiffShapesLayerCPUTest.CompareWithRefs/Input0_IS=.*_TS=\(10.5.10\)_\(1.10.1\)_\(1.10.1\)_\(2.1.2\)_types=trip_count_type=PARAMETER_trip_count=(1|5)_exec_cond=1_netType=bf16.*)"); - // retVector.emplace_back(R"(.*smoke_LoopForConcat/LoopForConcatLayerCPUTest.CompareWithRefs/Input0_IS=.*_TS=\(10.5.10\)_\(1.10.1\)_\(1.10.1\)_\(2.1.2\)_types=trip_count_type=PARAMETER_trip_count=(1|5)_exec_cond=1_netType=bf16.*)"); - // retVector.emplace_back(R"(.*smoke_LoopForConcat/LoopForConcatLayerCPUTest.CompareWithRefs/Input0_IS=.*_TS=\(10.10.10\)_\(5.10.10\)_\(5.10.10\)_\(8.10.10\)_Input1_IS=\[\?.10.10\]_.*_types=trip_count_type=PARAMETER_trip_count=1_exec_cond=1_netType=bf16.*)"); - // retVector.emplace_back(R"(.*smoke_Deconv_(2|3)D_NSPC_INT8_AMX/DeconvolutionLayerCPUTest.*)"); } if (ov::with_cpu_x86_avx512_core_fp16()) { diff --git a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp index db2b1636e5988d..2a29775c6ebd87 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp @@ -262,7 +262,7 @@ TEST(VariablesTest, smoke_set_get_state_with_convert) { auto variables = request.query_state(); ASSERT_EQ(variables.size(), 1); auto variable = variables.front(); -ASSERT_EQ(variable.get_name(), "v0"); + ASSERT_EQ(variable.get_name(), "v0"); auto state_tensor = variable.get_state(); ASSERT_EQ(state_tensor.get_shape(), virable_shape); ASSERT_EQ(state_tensor.get_element_type(), et); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 105c6a951c1da5..2b2c2c92f71d29 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -201,32 +201,8 @@ std::vector disabledTestPatterns() { R"(.*smoke_RDFT_5d_last_axis/RDFTLayerTest.Inference/IS=\(10.4.8.2.5\)_modelType=f32_Axes=\(0.1.2.3.4\)_SignalSize=\(\).*)", // Issue: 136862 R"(.*smoke_ConditionGPUTest_static/StaticConditionLayerGPUTest.CompareWithRefs/IS=\(3.6\)_netPRC=i8_ifCond=PARAM_targetDevice=GPU_.*)", - // // ranges - // R"(.*smoke_MaxPool8_ExplicitPad_(Floor|Ceil)Rounding/MaxPoolingV8LayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.3.30.30\)\}_K\(3.3\)_S\(1.1\)_D\(1.1\)_PB\(0.0\)_PE\(0.0\)_IETi32_(A0|A2)_Rounding=(floor|cell)_AutoPad=explicit_modelType=f32_.*)", - // R"(.*smoke_ConvolutionBackpropData3D_ExplicitPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.3.10.10.10\)\}_OS=\(\)_K\(3.3.3\)_S\(3.3.3\)_PB\(0.0.0\)_PE\(0.0.0\)_D=\(1.1.1\)_.*_O=5_AP=valid_netPRC=f16.*)", - // R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.3.10.10.10\)\}_OS=\(\)_K\(3.3.3\)_S\(3.3.3\)_PB\(0.0.0\)_.*_D=\(1.1.1\)_.*_O=5_AP=explicit_netPRC=f16_.*)", - // R"(.*smoke_Multinomial/MultinomialLayerTestGPU.Inference/static_probs_shape=\[(1,32|2,28)\]_num_samples=(2|4)_inType=f16_convert_type=i64_.*_seed_g=0_seed_o=2_.*)", - // R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.3.10.10.10\)\}_OS=\(\)_K\(3.3.3\)_S\(3.3.3\)_PB\(0.0.0\)_.*_D=\(1.1.1\)_.*_O=5_AP=explicit_netPRC=f16_.*)", - // R"(.*smoke_gathertree_constant_compareWithRefs_dynamic/GatherTreeLayerGPUTest.Inference/IS=.*_secondaryInputType=CONSTANT_netPRC=f32_.*)", - // R"(.*Basic_smoke/GatherTreeLayerTest.Inference/IS=\(5.1.10\)_secondary_input_type=PARAMETER_netPRC=f16_trgDev=GPU.*)", - // R"(.*Basic_smoke/GatherTreeLayerTest.Inference/IS=\(20.1.10\)_secondary_input_type=.*_netPRC=f16_trgDev=GPU.*)", - // R"(.*Basic_smoke/GatherTreeLayerTest.Inference/IS=\(20.1.10\)_secondary_input_type=CONSTANT_netPRC=f32_trgDev=GPU.*)", - // R"(.*Basic_smoke/GatherTreeLayerTest.Inference/IS=\(20.20.10\)_secondary_input_type=.*_netPRC=(f32|f16)_trgDev=GPU.*)", - // R"(.*smoke_AvgPool_ExplicitPad_CeilRounding/PoolingLayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.3.30.30\)\}_AvgPool_.*_Rounding=ceil_AutoPad=explicit_modelType=f16.*)", - // R"(.*smoke_AvgPool_ExplicitPad_FloorRounding/PoolingLayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.3.30.30\)\}_AvgPool_ExcludePad=.*_Rounding=floor_AutoPad=explicit_modelType=f16.*)", - // R"(.*smoke_MAX_and_AVGPool_ValidPad/PoolingLayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.3.50.50\)\}_AvgPool_ExcludePad=0_K\(3.3\).*_PB\(0.0\)_.*_Rounding=floor_AutoPad=valid_modelType=f16.*)", - // R"(.*smoke_MaxPool8_ExplicitPad_FloorRounding/MaxPoolingV8LayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.3.30.30\)\}_K\(3.5\)_S\(1.1\)_D\(1.1\)_PB\(0.0\)_PE\(0.0\)_IETi32_A(0|2)_Rounding=floor_AutoPad=explicit_modelType=f32.*)", - // R"(.*smoke_MaxPool8_ExplicitPad_CeilRounding/MaxPoolingV8LayerTest.Inference/IS=\(\[\]\)_TS=\{\(1.3.30.30\)\}_K\(3.*\)_S\(1.1\)_D\(1.1\)_PB\(0.0\)_PE\(0.0\)_IETi32_A(0|2)_Rounding=ceil_AutoPad=explicit_modelType=f32.*)", - // R"(.*smoke_TestsROIAlign_average/ROIAlignLayerTest.Inference/IS=\(\[\]\)_TS=\{\(3.8.16.16\)\}_coordShape=\(2.4\)_pooledH=2_pooledW=2_spatialScale=1_poolingRatio=2_poolingMode=avg_modelType=f32.*)", - // R"(.*Basic_smoke/GatherTreeLayerTest.Inference/IS=\(5.1.10\)_secondary_input_type=PARAMETER_netPRC=f16.*)", - // R"(.*Basic_smoke/GatherTreeLayerTest.Inference/IS=\(20.(20|1).10\)_secondary_input_type=CONSTANT_netPRC=(f16|f32).*)", - // R"(.*Basic_smoke/GatherTreeLayerTest.Inference/IS=\(20.1.10\)_secondary_input_type=PARAMETER_netPRC=f16.*)", - // R"(.*smoke_TestsROIAlign_avg_asym/ROIAlignV9LayerTest.Inference/IS=\(\[\]\)_TS=\{\(2.8.20.20\)\}_coordShape=\(2.4\)_pooledH=2_pooledW=2_spatialScale=1_poolingRatio=2_poolingMode=avg_ROIMode=asymmetric_modelType=f32.*)", - // R"(.*smoke_Grn_Basic/GrnLayerTest.Inference/IS=\(\[\]\)_TS=\{\((2.16.15.20|1.3.30.30)\)\}_modelType=f16_bias=(1.1|0.33).*)", #if defined(_WIN32) // by calc abs_threshold with expected value - // R"(.*Basic_smoke/GatherTreeLayerTest.Inference/IS=\(20.20.10\)_secondary_input_type=PARAMETER_netPRC=f16.*)", - // R"(.*Basic_smoke/GatherTreeLayerTest.Inference/IS=\(5.1.10\)_secondary_input_type=CONSTANT_netPRC=f16.*)", R"(.*smoke_RemoteTensor/OVRemoteTensorBatched_Test.NV12toBGR_buffer/(num_batch_4|num_batch_2).*)", R"(.*smoke_Check/ConstantResultSubgraphTest.Inference/SubgraphType=SINGLE_COMPONENT_IS=\[1,3,10,10\]_IT=i16_Device=GPU.*)", #endif diff --git a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/include/gflag_config.hpp b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/include/gflag_config.hpp index aeef76416c6214..b891061ad57212 100644 --- a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/include/gflag_config.hpp +++ b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/include/gflag_config.hpp @@ -46,7 +46,6 @@ static const char ignore_crash_message[] = "Optional. Allow to not terminate the "This is organized with custom crash handler. Please, note, that handler work for test body," "if crash happened on SetUp/TearDown stage, the process will be terminated."; static const char reference_cache_dir_message[] = "Optional. Set the directory with reference cache"; -static const char target_ops_message[] = "Optional."; DEFINE_bool(h, false, help_message); @@ -63,7 +62,6 @@ DEFINE_string(shape_mode, "", shape_mode_message); DEFINE_uint32(test_timeout, UINT_MAX, test_timeout_message); DEFINE_uint32(ignore_crash, false, ignore_crash_message); DEFINE_string(ref_dir, "", reference_cache_dir_message); -DEFINE_string(target_ops, "", target_ops_message); /** * @brief This function shows a help message @@ -85,7 +83,6 @@ static void showUsage() { std::cout << " --plugin_lib_name " << output_folder_message << std::endl; std::cout << " --shape_mode \"\" " << shape_mode_message << std::endl; std::cout << " --test_timeout \"\" " << test_timeout_message << std::endl; - std::cout << " --target_ops \"\" " << target_ops_message << std::endl; std::cout << " --ignore_crash " << ignore_crash_message << std::endl; std::cout << " --ref_dir \"\" " << reference_cache_dir_message << std::endl; } diff --git a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/main.cpp b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/main.cpp index eec9d4be5139cd..693c2cca018d00 100644 --- a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/main.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/main.cpp @@ -112,10 +112,6 @@ int main(int argc, char* argv[]) { ov::test::utils::global_plugin_config = ov::test::conformance::read_plugin_config(FLAGS_config_path); } - if (!FLAGS_target_ops.empty()) { - ov::test::utils::target_ops = FLAGS_target_ops.c_str(); - } - ::testing::InitGoogleTest(&argc, argv); ::testing::AddGlobalTestEnvironment(new ov::test::utils::TestEnvironment); diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp index 4753af4c19e9f2..458689d4333a6b 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp @@ -260,24 +260,6 @@ void ReadIRTest::SetUp() { if (inputShapes.empty()) { GTEST_SKIP() << "The graph is constant. The case is not applicable for Operation conformance scenario"; } - if (!ov::test::utils::target_ops.empty()) { - std::regex op_regexp = std::regex(ov::test::utils::target_ops); - bool op_exists = false; - for (const auto& node : function->get_ops()) { - if (ov::op::util::is_constant(node) || - ov::op::util::is_parameter(node)) { - continue; - } - auto a = node->get_type_name(); - if (std::regex_match(node->get_type_name(), op_regexp)) { - op_exists = true; - break; - } - } - if (!op_exists) { - GTEST_SKIP() << "The graph is not contains requeried ops: " << ov::test::utils::target_ops << std::endl; - } - } std::cout << "[ CONFORMANCE ] Influence coefficient: " << rel_influence_coef << std::endl; init_input_shapes(inputShapes); is_report_stages = true; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp index 4a17fdf3d2ff7d..d2930be59d5eac 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp @@ -7,7 +7,7 @@ #include "openvino/core/preprocess/color_format.hpp" #include "functional_test_utils/common_utils.hpp" -#include "ranges.hpp" +#include "shared_test_classes/base/utils/ranges.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp index db1b9d0079d2df..865687e3073024 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp @@ -12,143 +12,16 @@ #include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/type_ranges.hpp" #include "openvino/core/node.hpp" -#include "openvino/op/abs.hpp" -#include "openvino/op/acos.hpp" -#include "openvino/op/acosh.hpp" -#include "openvino/op/asin.hpp" -#include "openvino/op/asinh.hpp" -#include "openvino/op/atan.hpp" -#include "openvino/op/atanh.hpp" -#include "openvino/op/batch_norm.hpp" -#include "openvino/op/broadcast.hpp" -#include "openvino/op/ceiling.hpp" -#include "openvino/op/clamp.hpp" -#include "openvino/op/constant.hpp" -#include "openvino/op/convert.hpp" -#include "openvino/op/convolution.hpp" -#include "openvino/op/cos.hpp" -#include "openvino/op/cosh.hpp" -#include "openvino/op/deformable_convolution.hpp" -#include "openvino/op/depth_to_space.hpp" -#include "openvino/op/dft.hpp" -#include "openvino/op/divide.hpp" -#include "openvino/op/einsum.hpp" -#include "openvino/op/elu.hpp" -#include "openvino/op/erf.hpp" -#include "openvino/op/exp.hpp" -#include "openvino/op/experimental_detectron_generate_proposals.hpp" -#include "openvino/op/experimental_detectron_prior_grid_generator.hpp" -#include "openvino/op/eye.hpp" -#include "openvino/op/fake_quantize.hpp" -#include "openvino/op/floor.hpp" -#include "openvino/op/floor_mod.hpp" -#include "openvino/op/gather.hpp" -#include "openvino/op/gather_nd.hpp" -#include "openvino/op/gelu.hpp" -#include "openvino/op/group_conv.hpp" -#include "openvino/op/gru_sequence.hpp" -#include "openvino/op/hard_sigmoid.hpp" -#include "openvino/op/hsigmoid.hpp" -#include "openvino/op/hswish.hpp" -#include "openvino/op/idft.hpp" -#include "openvino/op/interpolate.hpp" -#include "openvino/op/irdft.hpp" -#include "openvino/op/log.hpp" -#include "openvino/op/logical_and.hpp" -#include "openvino/op/logical_not.hpp" -#include "openvino/op/logical_or.hpp" -#include "openvino/op/logical_xor.hpp" -#include "openvino/op/lrn.hpp" -#include "openvino/op/lstm_sequence.hpp" -#include "openvino/op/matmul.hpp" -#include "openvino/op/matrix_nms.hpp" -#include "openvino/op/max_pool.hpp" -#include "openvino/op/maximum.hpp" -#include "openvino/op/minimum.hpp" -#include "openvino/op/mish.hpp" -#include "openvino/op/mod.hpp" -#include "openvino/op/multiply.hpp" -#include "openvino/op/negative.hpp" -#include "openvino/op/non_max_suppression.hpp" -#include "openvino/op/pad.hpp" -#include "openvino/op/parameter.hpp" -#include "openvino/op/power.hpp" -#include "openvino/op/prelu.hpp" -#include "openvino/op/proposal.hpp" -#include "openvino/op/random_uniform.hpp" -#include "openvino/op/range.hpp" -#include "openvino/op/rdft.hpp" -#include "openvino/op/reduce_l1.hpp" -#include "openvino/op/reduce_l2.hpp" -#include "openvino/op/reduce_logical_and.hpp" -#include "openvino/op/reduce_logical_or.hpp" -#include "openvino/op/reduce_max.hpp" -#include "openvino/op/reduce_mean.hpp" -#include "openvino/op/reduce_min.hpp" -#include "openvino/op/reduce_prod.hpp" -#include "openvino/op/reduce_sum.hpp" -#include "openvino/op/region_yolo.hpp" -#include "openvino/op/relu.hpp" -#include "openvino/op/reshape.hpp" -#include "openvino/op/rnn_sequence.hpp" -#include "openvino/op/roi_align.hpp" -#include "openvino/op/round.hpp" -#include "openvino/op/scatter_elements_update.hpp" -#include "openvino/op/scatter_update.hpp" -#include "openvino/op/select.hpp" -#include "openvino/op/selu.hpp" -#include "openvino/op/sigmoid.hpp" -#include "openvino/op/sign.hpp" -#include "openvino/op/sin.hpp" -#include "openvino/op/sinh.hpp" -#include "openvino/op/softplus.hpp" -#include "openvino/op/softsign.hpp" -#include "openvino/op/space_to_batch.hpp" -#include "openvino/op/sqrt.hpp" -#include "openvino/op/strided_slice.hpp" -#include "openvino/op/subtract.hpp" -#include "openvino/op/swish.hpp" -#include "openvino/op/tan.hpp" -#include "openvino/op/tanh.hpp" -#include "openvino/op/topk.hpp" -#include "openvino/op/unsqueeze.hpp" -#include "openvino/op/variadic_split.hpp" -#include "openvino/op/cum_sum.hpp" -#include "openvino/op/mvn.hpp" -#include "openvino/op/gru_cell.hpp" -#include "openvino/op/gru_sequence.hpp" -#include "openvino/op/if.hpp" -#include "openvino/op/tensor_iterator.hpp" -#include "openvino/op/group_normalization.hpp" -#include "openvino/op/reverse_sequence.hpp" -#include "openvino/op/gather_tree.hpp" -#include "openvino/op/deformable_psroi_pooling.hpp" -#include "openvino/op/softmax.hpp" -#include "openvino/op/psroi_pooling.hpp" -#include "ov_ops/augru_sequence.hpp" +#include "openvino/op/ops.hpp" #include "ov_ops/augru_cell.hpp" -#include "openvino/op/roll.hpp" -#include "openvino/op/lstm_cell.hpp" -#include "openvino/op/lstm_sequence.hpp" -#include "openvino/op/squared_difference.hpp" -#include "openvino/op/scaled_dot_product_attention.hpp" -#include "openvino/op/transpose.hpp" -#include "openvino/op/loop.hpp" -#include "openvino/op/squared_difference.hpp" -#include "openvino/op/avg_pool.hpp" -#include "openvino/op/ctc_loss.hpp" -#include "openvino/op/grid_sample.hpp" -#include "openvino/op/multinomial.hpp" -#include "openvino/op/embeddingbag_offsets_sum.hpp" -#include "openvino/op/generate_proposals.hpp" -#include "openvino/op/roi_pooling.hpp" -#include "openvino/op/shuffle_channels.hpp" -#include "openvino/op/slice.hpp" +#include "ov_ops/augru_sequence.hpp" namespace ov { namespace test { namespace utils { +// NOTE: Default ranges are collected by data type and have resolution 1(for real types too) +// to set up correct ranges and resolutions, please, configure range for Op in inputRanges structure struct Range { std::vector int_port_ranges; std::vector real_port_ranges; @@ -273,22 +146,24 @@ static std::map inputRanges = { {ov::op::v4::Interpolate::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v0::LRN::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v1::Pad::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, - {ov::op::v3::Broadcast::get_type_info_static(), Range({{0, 200}}, {{0, 2000, 32768}})}, - {ov::op::v5::NonMaxSuppression::get_type_info_static(), Range({{0, 15}, {0, 1, 1000, 1, true}}, - {{0, 8, 32}, {0, 1, 1000, 1, true}})}, - {ov::op::v9::NonMaxSuppression::get_type_info_static(), Range({{0, 15}, {0, 1, 1000, 1, true}}, - {{0, 8, 32}, {0, 1, 1000, 1, true}})}, - {ov::op::v8::MatrixNms::get_type_info_static(), Range({{0, 15}, {0, 1, 1000, 1, true}}, - {{0, 8, 32}, {0, 1, 1000, 1, true}})}, - {ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage::get_type_info_static(), Range({{1, 0, 1, 1}}, {{1, 0, 1, 1}})}, - {ov::op::v6::ExperimentalDetectronPriorGridGenerator::get_type_info_static(), Range({{0, 0, 1}}, - {{-100, 200, 2, 1}, {0, 0, 1, 1, true}, {0, 0, 1, 1, true}})}, - {ov::op::v8::DeformableConvolution::get_type_info_static(), Range({{0, 15}, {0, 2, 10, 1, true}, {0, 1, 20, 1, true}}, - {{0, 8, 32}, {0, 2, 10, 1, true}, {0, 1, 20, 1, true}})}, + {ov::op::v3::Broadcast::get_type_info_static(), Range({{0, 200}, {0, 10, 1, 1, true}, {0, 10, 1, 1, true}}, {{0, 2000, 32768}})}, + {ov::op::v5::NonMaxSuppression::get_type_info_static(), + Range({{0, 15}, {0, 1, 1000, 1, true}}, {{0, 8, 32}, {0, 1, 1000, 1, true}})}, + {ov::op::v9::NonMaxSuppression::get_type_info_static(), + Range({{0, 15}, {0, 1, 1000, 1, true}}, {{0, 8, 32}, {0, 1, 1000, 1, true}})}, + {ov::op::v8::MatrixNms::get_type_info_static(), + Range({{0, 15}, {0, 1, 1000, 1, true}}, {{0, 8, 32}, {0, 1, 1000, 1, true}})}, + {ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage::get_type_info_static(), + Range({{1, 0, 1, 1}}, {{1, 0, 1, 1}})}, + {ov::op::v6::ExperimentalDetectronPriorGridGenerator::get_type_info_static(), + Range({{0, 0, 1}}, {{-100, 200, 2, 1}, {0, 0, 1, 1, true}, {0, 0, 1, 1, true}})}, + {ov::op::v8::DeformableConvolution::get_type_info_static(), + Range({{0, 15}, {0, 2, 10, 1, true}, {0, 1, 20, 1, true}}, + {{0, 8, 32}, {0, 2, 10, 1, true}, {0, 1, 20, 1, true}})}, {ov::op::v5::GRUSequence::get_type_info_static(), Range({{0, 15}, {0, 15}, {0, 10, 1, 1, true}}, {{0, 8, 32}})}, {ov::op::v5::BatchNormInference::get_type_info_static(), Range({{0, 3}}, {{0, 3, 1}})}, - {ov::op::v5::RNNSequence::get_type_info_static(), Range({{0, 15}, {0, 15}, {0, 10, 1, 1, true}}, - {{0, 8, 32}, {0, 8, 32}, {0, 10, 1, 1, true}})}, + {ov::op::v5::RNNSequence::get_type_info_static(), + Range({{0, 15}, {0, 15}, {0, 10, 1, 1, true}}, {{0, 8, 32}, {0, 8, 32}, {0, 10, 1, 1, true}})}, {ov::op::v1::LogicalAnd::get_type_info_static(), Range({{0, 2}}, {{0, 2}})}, {ov::op::v1::LogicalNot::get_type_info_static(), Range({{0, 2}}, {{0, 2}})}, {ov::op::v1::LogicalOr::get_type_info_static(), Range({{0, 2}}, {{0, 2}})}, @@ -298,12 +173,14 @@ static std::map inputRanges = { {ov::op::v1::Reshape::get_type_info_static(), Range({{-1000, 2000}, {0, 256, 1, 1, true}}, {{-100, 200, 32768}})}, {ov::op::v3::TopK::get_type_info_static(), Range({{-1000, 2000}, {0, 1000, 1, 1, true}}, {{-1000, 2000, 32768}})}, {ov::op::v11::TopK::get_type_info_static(), Range({{-1000, 2000}, {0, 1000, 1, 1, true}}, {{-1000, 2000, 32768}})}, - {ov::op::v4::Range::get_type_info_static(), Range({{0, 15}, {1, 1000, 1, 1, true}}, - {{-1000, 2000, 32768}, {1, 1000, 1, 1, true}})}, - {ov::op::v3::ROIAlign::get_type_info_static(), Range({{0, 15}, {0, 1000, 1, 1, true}, {0, 1000, 1, 1, true}}, - {{-1000, 2000, 32768}, {0, 1000, 1, 1, true}, {0, 1000, 1, 1, true}})}, - {ov::op::v9::ROIAlign::get_type_info_static(), Range({{0, 15}, {0, 1000, 1, 1, true}, {0, 1000, 1, 1, true}}, - {{-1000, 2000, 32768}, {0, 1000, 1, 1, true}, {0, 1000, 1, 1, true}})}, + {ov::op::v4::Range::get_type_info_static(), + Range({{0, 15}, {1, 1000, 1, 1, true}}, {{-1000, 2000, 32768}, {1, 1000, 1, 1, true}})}, + {ov::op::v3::ROIAlign::get_type_info_static(), + Range({{0, 15}, {0, 1000, 1, 1, true}, {0, 1000, 1, 1, true}}, + {{-1000, 2000, 32768}, {0, 1000, 1, 1, true}, {0, 1000, 1, 1, true}})}, + {ov::op::v9::ROIAlign::get_type_info_static(), + Range({{0, 15}, {0, 1000, 1, 1, true}, {0, 1000, 1, 1, true}}, + {{-1000, 2000, 32768}, {0, 1000, 1, 1, true}, {0, 1000, 1, 1, true}})}, {ov::op::v0::Convert::get_type_info_static(), Range({{0, 1000}}, {{-100, 200, 32768}})}, {ov::op::v0::FakeQuantize::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v0::FakeQuantize::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, @@ -340,16 +217,23 @@ static std::map inputRanges = { {ov::op::v4::LSTMCell::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v13::ScaledDotProductAttention::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v1::Transpose::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, - {ov::op::v5::Loop::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, + {ov::op::v5::Loop::get_type_info_static(), Range({{1, 10, 1, 1, true}, {0, 2, 1, 1, true}, {0, 15}}, {{0, 8, 32}})}, {ov::op::v0::SquaredDifference::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v4::CTCLoss::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v9::GridSample::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v13::Multinomial::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v3::EmbeddingBagOffsetsSum::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, + {ov::op::v15::EmbeddingBagOffsets::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v9::GenerateProposals::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v0::ROIPooling::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v0::ShuffleChannels::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, - {ov::op::v8::Slice::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, + {ov::op::v8::Slice::get_type_info_static(), + Range({{0, 15}, {0, 15, 1, 1, true}, {0, 15, 1, 1, true}, {1, 5, 1, 1, true}, {0, 15, 1, 1, true}}, {{0, 8, 32}})}, + {ov::op::v3::EmbeddingBagPackedSum::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, + {ov::op::v3::EmbeddingSegmentsSum::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, + {ov::op::v15::EmbeddingBagPacked::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, + {ov::op::v0::GRN::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, + {ov::op::v1::Add::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, }; class ModelRange { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/tensor_iterator.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/tensor_iterator.hpp index 41749ca218e4a3..f02da2a92511b2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/tensor_iterator.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/tensor_iterator.hpp @@ -34,6 +34,7 @@ class TensorIteratorTest : public testing::WithParamInterface& targetInputStaticShapes) override; }; } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp index 47e58a8839034f..3b57bb8c56602b 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp @@ -62,7 +62,6 @@ void GRUSequenceTest::SetUp() { ov::op::RecurrentSequenceDirection direction; InputLayerType wbr_type; ov::test::utils::SequenceTestsMode mode; - // ov::element::Type_t in_type = ov::element::undefined; std::tie(mode, shapes, activations, clip, linear_before_reset, direction, wbr_type, inType, targetDevice) = this->GetParam(); outType = inType; diff --git a/src/tests/functional/shared_test_classes/src/single_op/tensor_iterator.cpp b/src/tests/functional/shared_test_classes/src/single_op/tensor_iterator.cpp index de359a3f8c0752..32f48dea3b7ec1 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/tensor_iterator.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/tensor_iterator.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include "transformations/control_flow/unroll_tensor_iterator.hpp" #include "shared_test_classes/single_op/tensor_iterator.hpp" #include "openvino/pass/manager.hpp" @@ -236,5 +237,21 @@ void TensorIteratorTest::SetUp() { m.run_passes(function); } } + +void TensorIteratorTest::generate_inputs(const std::vector& targetInputStaticShapes) { + inputs.clear(); + + const auto& funcInputs = function->inputs(); + for (size_t i = 0; i < funcInputs.size(); i++) { + const auto& funcInput = funcInputs[i]; + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 8; + in_data.resolution = funcInput.get_element_type().is_real() ? 32 : 1; + ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); + inputs.insert({funcInput.get_node_shared_ptr(), tensor}); + } +} + } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp index 13baf0feb56dce..69138c3bedf3c5 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/perm_conv_perm_concat.cpp @@ -44,6 +44,10 @@ void PermConvPermConcat::SetUp() { std::tie(element_type, targetDevice, input_shape, kernel_shape, output_channels, additional_config) = this->GetParam(); + if (element_type == ov::element::f32) { + abs_threshold = 1e-6; + } + configuration.insert(additional_config.begin(), additional_config.end()); const std::size_t input_dim = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()); diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_plugin_cache.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_plugin_cache.hpp index 9a181e63f3ffc3..5661cb8abd0a04 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_plugin_cache.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_plugin_cache.hpp @@ -18,7 +18,6 @@ namespace utils { // global plugin config is set up for conformance extern ov::AnyMap global_plugin_config; extern std::string target_device; -extern std::string target_ops; extern std::string target_plugin_name; extern std::unordered_set available_devices; diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp index 8bc454f035cf44..c971ee3f3771fc 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp @@ -57,64 +57,6 @@ struct InputGenerateData { } }; - bool correct_range(const std::shared_ptr new_range) { - bool success = true; - - double new_max = new_range->start_from + new_range->range; - double current_max = start_from + range; - - if (start_from == new_range->start_from) { - // nothing to do - -----start_curr/new+++++++++++++++range*res curr/new----------------------- - // nothing to do - -----start_curr/new+++++++++++++++range*res curr----------range*res new---- - // reduce range - -----start_curr/new+++++++++++++++range*res new-----------range*res curr--- - if (current_max > new_max) { - range = new_range->range; - resolution = new_range->resolution > resolution ? new_range->resolution : resolution; - } - } else if (start_from > new_range->start_from) { - // nothing to do - -----start_new-----start_curr++++++++++range*res curr/new------------------- - // nothing to do - -----start_new-----start_curr++++++++++range*res curr------range*res new---- - // reduce range - -----start_new-----start_curr++++++++++range*res new-------range*res curr--- - // could not find range - -----start_new---range*res new-----start_curr-----range*res curr--- - if (start_from > new_max) { - success = false; -#ifndef NDEBUG - std::cout << " FAIL TO FIND RANGE: current->start_from > new_range->start_from + new_range->range " - << " current->start_from: " << std::to_string(start_from) - << " new_range->start_from: " << std::to_string(new_range->start_from) - << " new_range max: " << std::to_string(new_max) << std::endl; -#endif - } else if (current_max > new_max) { - range = (uint32_t)round(new_max - start_from); - resolution = new_range->resolution > resolution ? new_range->resolution : resolution; - } - } else if (start_from < new_range->start_from) { - // reset to new - -----start_curr-----start_new++++++++++range*res curr/new------------------- - // reset to new - -----start_curr-----start_new++++++++++range*res new-------range*res curr--- - // recalculate range - -----start_curr-----start_new++++++++++range*res curr------range*res new---- - // could not find range - -----start_curr---range*res curr-----start_new-----range*res new--- - if (current_max < new_range->start_from) { - success = false; -#ifndef NDEBUG - std::cout << " FAIL TO FIND RANGE: current->start_from + current->range < new_range->start_from " - << " new_range start_from: " << std::to_string(new_range->start_from) - << " current->start_from: " << std::to_string(start_from) - << " current max: " << std::to_string(current_max) << std::endl; -#endif - } else if (current_max >= new_max) { - start_from = new_range->start_from; - range = new_range->range; - resolution = new_range->resolution > resolution ? new_range->resolution : resolution; - } else { - range = (uint32_t)round(current_max - new_range->start_from); - resolution = new_range->resolution > resolution ? new_range->resolution : resolution; - start_from = new_range->start_from; - } - } - - return success; - }; - bool correct_range(const InputGenerateData new_range) { bool success = true; diff --git a/src/tests/test_utils/common_test_utils/src/ov_plugin_cache.cpp b/src/tests/test_utils/common_test_utils/src/ov_plugin_cache.cpp index cc7f34fd13c7fb..0b7778784f21db 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_plugin_cache.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_plugin_cache.cpp @@ -19,7 +19,6 @@ namespace utils { ov::AnyMap global_plugin_config = {}; std::unordered_set available_devices = {}; std::string target_device = ""; -std::string target_ops = ""; std::string target_plugin_name = ""; void register_plugin(ov::Core& ov_core) noexcept { diff --git a/src/tests/test_utils/common_test_utils/tests/generate_intepus.cpp b/src/tests/test_utils/common_test_utils/tests/generate_inputs.cpp similarity index 79% rename from src/tests/test_utils/common_test_utils/tests/generate_intepus.cpp rename to src/tests/test_utils/common_test_utils/tests/generate_inputs.cpp index ec4727e8ec3234..88c6e122f30e46 100644 --- a/src/tests/test_utils/common_test_utils/tests/generate_intepus.cpp +++ b/src/tests/test_utils/common_test_utils/tests/generate_inputs.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2023 Intel Corporation +// Copyright (C) 20234 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -9,7 +9,7 @@ #include "shared_test_classes/base/utils/ranges.hpp" #include "shared_test_classes/base/utils/generate_inputs.hpp" -#include "openvino/op/add.hpp" +#include "openvino/op/concat.hpp" #include "openvino/op/relu.hpp" #include "openvino/op/parameter.hpp" #include "openvino/op/result.hpp" @@ -23,7 +23,7 @@ using namespace ov::util; using ov::Shape; using ov::op::v0::Parameter; using ov::op::v0::Result; -using ov::op::v1::Add; +using ov::op::v0::Concat; using ov::op::v0::Relu; using ov::op::v1::ReduceMean; using ov::op::v1::FloorMod; @@ -32,8 +32,8 @@ using ov::op::v1::Reshape; TEST(RangesTests, ranges_by_type_real) { auto p0 = std::make_shared(ov::element::f16, Shape{3}); auto p1 = std::make_shared(ov::element::f16, Shape{3}); - auto add = std::make_shared(p0, p1); - auto func = std::make_shared(add, ov::ParameterVector{p0, p1}); + auto concat = std::make_shared(ov::OutputVector{p0, p1}, 0); + auto func = std::make_shared(concat, ov::ParameterVector{p0, p1}); ov::test::utils::ModelRange modelRange; modelRange.find_mode_ranges(func); @@ -49,8 +49,8 @@ TEST(RangesTests, ranges_by_type_real) { ASSERT_EQ(real_range->range, range); ASSERT_EQ(real_range->resolution, 1); - for (size_t port = 0; port < add->get_input_size(); ++port) { - ov::Tensor tensor1 = modelRange.generate_input(add, port, Shape{3}); + for (size_t port = 0; port < concat->get_input_size(); ++port) { + ov::Tensor tensor1 = modelRange.generate_input(concat, port, Shape{3}); auto data1 = tensor1.data(); for (size_t i = 0; i < shape_size(tensor1.get_shape()); ++i) { double value = data1[i]; @@ -63,8 +63,8 @@ TEST(RangesTests, ranges_by_type_real) { TEST(RangesTests, ranges_by_type_int) { auto p0 = std::make_shared(ov::element::i8, Shape{3}); auto p1 = std::make_shared(ov::element::i8, Shape{3}); - auto add = std::make_shared(p0, p1); - auto func = std::make_shared(add, ov::ParameterVector{p0, p1}); + auto concat = std::make_shared(ov::OutputVector{p0, p1}, 0); + auto func = std::make_shared(concat, ov::ParameterVector{p0, p1}); ov::test::utils::ModelRange modelRange; modelRange.find_mode_ranges(func); @@ -76,8 +76,8 @@ TEST(RangesTests, ranges_by_type_int) { ASSERT_EQ(int_range->range, range); ASSERT_EQ(int_range->resolution, 1); - for (size_t port = 0; port < add->get_input_size(); ++port) { - ov::Tensor tensor1 = modelRange.generate_input(add, port, Shape{3}); + for (size_t port = 0; port < concat->get_input_size(); ++port) { + ov::Tensor tensor1 = modelRange.generate_input(concat, port, Shape{3}); auto data1 = tensor1.data(); for (size_t i = 0; i < shape_size(tensor1.get_shape()); ++i) { double value = data1[i]; @@ -92,9 +92,9 @@ TEST(RangesTests, intersection_real) { auto p1 = std::make_shared(ov::element::f32, Shape{3}); auto relu = std::make_shared(p0); - auto add = std::make_shared(p1, relu); + auto concat = std::make_shared(ov::OutputVector{p1, relu}, 0); - auto func = std::make_shared(add, ov::ParameterVector{p0, p1}); + auto func = std::make_shared(concat, ov::ParameterVector{p0, p1}); ov::test::utils::ModelRange modelRange; modelRange.find_mode_ranges(func); @@ -113,18 +113,18 @@ TEST(RangesTests, intersection_real) { ASSERT_LE(value, relu_range_ref.range); } - auto add_range_ref = ov::test::utils::rangeByType.get_range(ov::element::f32); - auto add_range = modelRange.get_range_for_param(p1); - ASSERT_EQ(add_range->start_from, add_range_ref.start_from); - ASSERT_EQ(add_range->range, add_range_ref.range); - ASSERT_EQ(add_range->resolution, add_range_ref.resolution); + auto concat_range_ref = ov::test::utils::rangeByType.get_range(ov::element::f32); + auto concat_range = modelRange.get_range_for_param(p1); + ASSERT_EQ(concat_range->start_from, concat_range_ref.start_from); + ASSERT_EQ(concat_range->range, concat_range_ref.range); + ASSERT_EQ(concat_range->resolution, concat_range_ref.resolution); - ov::Tensor tensor2 = modelRange.generate_input(add, 0, Shape{3}); + ov::Tensor tensor2 = modelRange.generate_input(concat, 0, Shape{3}); auto data2 = tensor1.data(); for (size_t i = 0; i < shape_size(tensor2.get_shape()); ++i) { double value = data2[i]; - ASSERT_GE(value, add_range_ref.start_from); - ASSERT_LE(value, add_range_ref.range); + ASSERT_GE(value, concat_range_ref.start_from); + ASSERT_LE(value, concat_range_ref.range); } } @@ -133,9 +133,9 @@ TEST(RangesTests, intersection_integral) { auto p1 = std::make_shared(ov::element::i32, Shape{3}); auto relu = std::make_shared(p0); - auto add = std::make_shared(p1, relu); + auto concat = std::make_shared(ov::OutputVector{p1, relu}, 0); - auto func = std::make_shared(add, ov::ParameterVector{p0, p1}); + auto func = std::make_shared(concat, ov::ParameterVector{p0, p1}); ov::test::utils::ModelRange modelRange; modelRange.find_mode_ranges(func); @@ -154,18 +154,18 @@ TEST(RangesTests, intersection_integral) { ASSERT_LE(value, relu_range_ref.range); } - auto add_range_ref = ov::test::utils::rangeByType.get_range(ov::element::f32); - auto add_range = modelRange.get_range_for_param(p1); - ASSERT_EQ(add_range->start_from, add_range_ref.start_from); - ASSERT_EQ(add_range->range, add_range_ref.range); - ASSERT_EQ(add_range->resolution, add_range_ref.resolution); + auto concat_range_ref = ov::test::utils::rangeByType.get_range(ov::element::f32); + auto concat_range = modelRange.get_range_for_param(p1); + ASSERT_EQ(concat_range->start_from, concat_range_ref.start_from); + ASSERT_EQ(concat_range->range, concat_range_ref.range); + ASSERT_EQ(concat_range->resolution, concat_range_ref.resolution); - ov::Tensor tensor2 = modelRange.generate_input(add, 0, Shape{3}); + ov::Tensor tensor2 = modelRange.generate_input(concat, 0, Shape{3}); auto data2 = tensor1.data(); for (size_t i = 0; i < shape_size(tensor2.get_shape()); ++i) { double value = data2[i]; - ASSERT_GE(value, add_range_ref.start_from); - ASSERT_LE(value, add_range_ref.range); + ASSERT_GE(value, concat_range_ref.start_from); + ASSERT_LE(value, concat_range_ref.range); } } @@ -177,9 +177,9 @@ TEST(RangesTests, spetial_ranges) { auto p2 = std::make_shared(ov::element::i32, Shape{1}); p2->set_friendly_name("p2"); - auto add = std::make_shared(p0, p1); - add->set_friendly_name("add"); - auto reshape = std::make_shared(add, p2, true); + auto concat = std::make_shared(ov::OutputVector{p0, p1}, 1); + concat->set_friendly_name("Concat"); + auto reshape = std::make_shared(concat, p2, true); reshape->set_friendly_name("reshape"); auto res = std::make_shared(reshape); @@ -195,7 +195,7 @@ TEST(RangesTests, spetial_ranges) { ASSERT_EQ(real_range->range, main_range.range); ASSERT_EQ(real_range->resolution, main_range.resolution); - ov::Tensor tensor1 = modelRange.generate_input(add, 0, Shape{1, 2, 3}); + ov::Tensor tensor1 = modelRange.generate_input(concat, 0, Shape{1, 2, 3}); auto data1 = tensor1.data(); for (size_t i = 0; i < shape_size(tensor1.get_shape()); ++i) { double value = data1[i]; @@ -224,8 +224,8 @@ TEST(RangesTests, intersection_range) { auto p2 = std::make_shared(ov::element::i32, Shape{1}); auto relu = std::make_shared(p0); - auto add = std::make_shared(p1, relu); - auto reduce = std::make_shared(add, p2, true); + auto concat = std::make_shared(ov::OutputVector{p1, relu}, 1); + auto reduce = std::make_shared(concat, p2, true); auto func = std::make_shared(reduce, ov::ParameterVector{p0, p1, p2}); diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv index 2dc390ed536fc8..857d92d1ddd718 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv @@ -246,4 +246,6 @@ conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR= conformance_Loop/ReadIRTest.Inference/Op=Loop.5_Type=f32_Shape=static_IR=35c61b2251b78ad9f9804bd3f9e301e1f974c6dc138ce0466b8b940d106ddd72_Device=CPU_Config=(),9.92895e-06 conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=7caba2dff8ab10660f66796a39d8d2a78f3e282f0629c2ecbee9b90c34e62aa0_Device=CPU_Config=(),2.1896e-06 conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=2e06088cb191d8d26309843b1285b9ae4a1eb0722e1370875edde7fd2783851b_Device=CPU_Config=(),1.88776e-06 -conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=183e5203c7008618a9cfb2680265bb3f588f80c2493bf7fac92eb258e66da2cf_Device=CPU_Config=(),1.88776e-06 \ No newline at end of file +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=183e5203c7008618a9cfb2680265bb3f588f80c2493bf7fac92eb258e66da2cf_Device=CPU_Config=(),1.88776e-06 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d9771ac46751569172412bbd4495eccdbac435f78a97f8fdfffa9215faa74544_Device=CPU_Config=(),1.88776e-06 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9b4725171957a420a98f908742f18062fbcee198871d527ab5b4d939005ac4e6_Device=CPU_Config=(),0.00116845 \ No newline at end of file