diff --git a/src/frontends/pytorch/src/op/add.cpp b/src/frontends/pytorch/src/op/add.cpp index ab2933bd6f7910..8ea9782838e9ff 100644 --- a/src/frontends/pytorch/src/op/add.cpp +++ b/src/frontends/pytorch/src/op/add.cpp @@ -26,7 +26,7 @@ OutputVector translate_add_common(const NodeContext& context, bool inplace) { if (dtype0.is() && dtype1.is()) { // aten::add.t(t[] a, t[] b) -> t[] // Case when two lists gets concatenated - FRONT_END_OP_CONVERSION_CHECK(false, "aten::add is used for concatenation of lists, not possible to convert"); + PYTORCH_OP_CONVERSION_CHECK(false, "aten::add is used for concatenation of lists, not possible to convert"); } if (inplace) { if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) diff --git a/src/frontends/pytorch/src/op/arange.cpp b/src/frontends/pytorch/src/op/arange.cpp index 8a6f05b9ba689d..f7b8b409db4ef7 100644 --- a/src/frontends/pytorch/src/op/arange.cpp +++ b/src/frontends/pytorch/src/op/arange.cpp @@ -60,7 +60,7 @@ OutputVector translate_arange(const NodeContext& context) { dtype_port = 3; dtype_applied = true; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type()); + PYTORCH_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type()); } if (dtype_port >= 0 && !context.input_is_none(dtype_port)) { if (std::dynamic_pointer_cast( @@ -72,7 +72,7 @@ OutputVector translate_arange(const NodeContext& context) { out_tensor = fw_node->input_value(0); dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto range = context.mark_node(std::make_shared(start, end, step, dtype)); @@ -130,7 +130,7 @@ OutputVector translate_arange_fx(const NodeContext& context) { dtype_port = 3; dtype_applied = true; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type()); + PYTORCH_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type()); } if (dtype_port >= 0 && !context.input_is_none(dtype_port)) { if (std::dynamic_pointer_cast( @@ -142,7 +142,7 @@ OutputVector translate_arange_fx(const NodeContext& context) { out_tensor = fw_node->input_value(0); dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto r_end = context.mark_node(std::make_shared(end, dtype)); diff --git a/src/frontends/pytorch/src/op/as_strided.cpp b/src/frontends/pytorch/src/op/as_strided.cpp index 5d1dfe38bdaa17..6bcaed8bfd49e3 100644 --- a/src/frontends/pytorch/src/op/as_strided.cpp +++ b/src/frontends/pytorch/src/op/as_strided.cpp @@ -32,8 +32,8 @@ OutputVector translate_as_strided(const NodeContext& context) { auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); auto input_strides = decoder->get_input_strides(0); - FRONT_END_OP_CONVERSION_CHECK(input_strides.size() != 0, - "aten::as_strided: Couldn't retrive input stride information from torchscript."); + PYTORCH_OP_CONVERSION_CHECK(input_strides.size() != 0, + "aten::as_strided: Couldn't retrive input stride information from torchscript."); std::vector idxs(input_strides.size()); iota(idxs.begin(), idxs.end(), 0); @@ -77,8 +77,8 @@ OutputVector translate_as_strided(const NodeContext& context) { if (!context.input_is_none(3)) { offset = context.get_input(3); } - FRONT_END_OP_CONVERSION_CHECK(sizes.size() == strides.size(), - "aten::as_strided: Vector for strides and sizes need to have equal length."); + PYTORCH_OP_CONVERSION_CHECK(sizes.size() == strides.size(), + "aten::as_strided: Vector for strides and sizes need to have equal length."); auto strides_size = strides.size() - 1; auto i = 0; auto strides_length_const = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {strides.size()})); diff --git a/src/frontends/pytorch/src/op/as_tensor.cpp b/src/frontends/pytorch/src/op/as_tensor.cpp index 93c4a647fb3ce7..6a97af6671303f 100644 --- a/src/frontends/pytorch/src/op/as_tensor.cpp +++ b/src/frontends/pytorch/src/op/as_tensor.cpp @@ -55,7 +55,7 @@ OutputVector translate_as_tensor(const NodeContext& context) { return {context.mark_node(std::make_shared(OutputVector(list_elems.begin(), list_elems.end()), 0))}; } else { // Input is already a tensor - FRONT_END_OP_CONVERSION_CHECK(list_elems.size() == 1, "Input must be single tensor."); + PYTORCH_OP_CONVERSION_CHECK(list_elems.size() == 1, "Input must be single tensor."); return {list_elems[0]}; } }; diff --git a/src/frontends/pytorch/src/op/avg_poolnd.cpp b/src/frontends/pytorch/src/op/avg_poolnd.cpp index e497256f1c6205..4a90db23a67c1e 100644 --- a/src/frontends/pytorch/src/op/avg_poolnd.cpp +++ b/src/frontends/pytorch/src/op/avg_poolnd.cpp @@ -45,8 +45,8 @@ OutputVector translate_avg_poolnd(const NodeContext& context) { if (!(context.input_is_none(5))) { count_include_pad = context.const_input(5); } - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(6), - "Translation for aten::avg_pool2d do not support divisor_override input."); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(6), + "Translation for aten::avg_pool2d do not support divisor_override input."); // Although ov::AvgPool provides exclude_pad=false, // The corner case of Average Pooling with ceil_mode on // PyTorch allows sliding window go off bound, which leads to this accommodation. diff --git a/src/frontends/pytorch/src/op/batch_norm.cpp b/src/frontends/pytorch/src/op/batch_norm.cpp index 5d10222eceb890..092e95ff38a290 100644 --- a/src/frontends/pytorch/src/op/batch_norm.cpp +++ b/src/frontends/pytorch/src/op/batch_norm.cpp @@ -139,8 +139,8 @@ OutputVector translate_batch_norm_legit_no_stats_fx(const NodeContext& context) bias = context.get_input(2); } auto training = context.const_input(3); - FRONT_END_OP_CONVERSION_CHECK(training, - "aten._native_batch_norm_legit.no_stats can only be used when training=True."); + PYTORCH_OP_CONVERSION_CHECK(training, + "aten._native_batch_norm_legit.no_stats can only be used when training=True."); // index 4 momentum is used during training only auto eps = context.const_input(5); auto output = make_batch_norm(context, context.get_input(0), weight, bias, {}, {}, eps); diff --git a/src/frontends/pytorch/src/op/cat.cpp b/src/frontends/pytorch/src/op/cat.cpp index 9476979a118bd7..5b873193157cda 100644 --- a/src/frontends/pytorch/src/op/cat.cpp +++ b/src/frontends/pytorch/src/op/cat.cpp @@ -35,7 +35,7 @@ OutputVector translate_cat_common(const NodeContext& context, return {context.mark_node(fw_node)}; } auto first_node = list_elems.front().get_node_shared_ptr(); - FRONT_END_OP_CONVERSION_CHECK( + PYTORCH_OP_CONVERSION_CHECK( list_elems.size() > 1 || !ov::as_type_ptr(first_node), "::cat is located inside body while inputs are located outside of the body. " "This case is not supported."); @@ -86,7 +86,7 @@ OutputVector translate_quantized_cat(const NodeContext& context) { num_inputs_check(context, 4, 4); const auto&& list_elems = get_list_as_outputs(context.get_input(0)); auto axis = context.const_input(1); - FRONT_END_OP_CONVERSION_CHECK(!list_elems.empty(), "Couldn't find quantized input for quantized::cat operation."); + PYTORCH_OP_CONVERSION_CHECK(!list_elems.empty(), "Couldn't find quantized input for quantized::cat operation."); return {quantize(context, translate_cat_common(context, list_elems, axis, false)[0], context.get_input(2), diff --git a/src/frontends/pytorch/src/op/conv_transposend.cpp b/src/frontends/pytorch/src/op/conv_transposend.cpp index 1f281f90486fad..079df5703e08ca 100644 --- a/src/frontends/pytorch/src/op/conv_transposend.cpp +++ b/src/frontends/pytorch/src/op/conv_transposend.cpp @@ -24,7 +24,7 @@ OutputVector translate_conv_transposend(const NodeContext& context) { auto pad_type = ov::op::PadType::EXPLICIT; auto dilations = context.const_input(7); auto groups = context.const_input(6); - FRONT_END_OP_CONVERSION_CHECK(groups > 0, "Number of groups for convolution_transpose should be >= 1"); + PYTORCH_OP_CONVERSION_CHECK(groups > 0, "Number of groups for convolution_transpose should be >= 1"); std::shared_ptr conv; if (groups == 1) { diff --git a/src/frontends/pytorch/src/op/elu.cpp b/src/frontends/pytorch/src/op/elu.cpp index 4f96371ee83ebd..fee33345436e1c 100644 --- a/src/frontends/pytorch/src/op/elu.cpp +++ b/src/frontends/pytorch/src/op/elu.cpp @@ -18,10 +18,10 @@ OutputVector translate_elu(const NodeContext& context) { auto x = context.get_input(0); auto alpha = context.const_input(1); // TODO: Figure out what scale and input_scale do - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == 1, - "Unexpected value of scale input for elu operation"); - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(3) || context.const_input(3) == 1, - "Unexpected value of input_scale input for elu operation"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == 1, + "Unexpected value of scale input for elu operation"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(3) || context.const_input(3) == 1, + "Unexpected value of input_scale input for elu operation"); return {context.mark_node(std::make_shared(x, alpha))}; }; diff --git a/src/frontends/pytorch/src/op/embedding_bag.cpp b/src/frontends/pytorch/src/op/embedding_bag.cpp index ee1cba3d1cff08..4560ea2a09db4f 100644 --- a/src/frontends/pytorch/src/op/embedding_bag.cpp +++ b/src/frontends/pytorch/src/op/embedding_bag.cpp @@ -21,7 +21,7 @@ OutputVector translate_embedding_bag(const NodeContext& context) { num_inputs_check(context, 9, 9); // we have only EmbeddingBagSum case support, check it before translation auto mode = context.const_input(4); - FRONT_END_OP_CONVERSION_CHECK(mode == 0, "Only sum mode supported for aten::embedding_bag translation"); + PYTORCH_OP_CONVERSION_CHECK(mode == 0, "Only sum mode supported for aten::embedding_bag translation"); auto weight = context.get_input(0); auto indices = context.get_input(1); indices = context.mark_node(std::make_shared(indices, element::i32)); @@ -44,7 +44,7 @@ OutputVector translate_embedding_bag(const NodeContext& context) { auto offsets = context.get_input(2); offsets = context.mark_node(std::make_shared(offsets, element::i32)); auto include_last_offset = context.const_input(7); - FRONT_END_OP_CONVERSION_CHECK(!include_last_offset, "Inclusion last offset is not supported"); + PYTORCH_OP_CONVERSION_CHECK(!include_last_offset, "Inclusion last offset is not supported"); // no per_sample_wights if (context.input_is_none(6)) { result = context.mark_node(std::make_shared(weight, indices, offsets)); diff --git a/src/frontends/pytorch/src/op/expand.cpp b/src/frontends/pytorch/src/op/expand.cpp index 7fcb7a898a48bc..2966436355a757 100644 --- a/src/frontends/pytorch/src/op/expand.cpp +++ b/src/frontends/pytorch/src/op/expand.cpp @@ -28,8 +28,8 @@ OutputVector translate_expand(const NodeContext& context) { auto x = context.get_input(0); auto sizes = context.get_input(1); // TODO: figure out what implicit means - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == false, - "Unexpected value of implicit for expand operation"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == false, + "Unexpected value of implicit for expand operation"); return base_expand(context, x, sizes); }; @@ -54,8 +54,8 @@ OutputVector translate_expand_fx(const NodeContext& context) { } auto sizes = context.get_input(1); // TODO: figure out what implicit means - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == false, - "Unexpected value of implicit for expand operation"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == false, + "Unexpected value of implicit for expand operation"); return base_expand(context, x, sizes); }; diff --git a/src/frontends/pytorch/src/op/eye.cpp b/src/frontends/pytorch/src/op/eye.cpp index 9b7f7ef8c3bc29..2a4be73a6ef500 100644 --- a/src/frontends/pytorch/src/op/eye.cpp +++ b/src/frontends/pytorch/src/op/eye.cpp @@ -36,7 +36,7 @@ OutputVector translate_eye(const NodeContext& context) { y = context.mark_node(std::make_shared(y, element::i32)); dtype_id = 2; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported number of inputs: ", num_inputs, " for aten::eye"); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported number of inputs: ", num_inputs, " for aten::eye"); } if (!context.input_is_none(dtype_id)) { dtype = convert_dtype(context.const_input(dtype_id)); diff --git a/src/frontends/pytorch/src/op/full.cpp b/src/frontends/pytorch/src/op/full.cpp index defcbab7095089..b87ec7867c96aa 100644 --- a/src/frontends/pytorch/src/op/full.cpp +++ b/src/frontends/pytorch/src/op/full.cpp @@ -264,7 +264,7 @@ OutputVector translate_fill_diagonal(const NodeContext& context) { auto const_zero_s = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); auto const_neg_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); if (input_rank.is_dynamic() || input_rank.get_length() < 2) { - FRONT_END_OP_CONVERSION_CHECK(false, "aten::fill_diagonal_ required tensor with static rank >= 2 "); + PYTORCH_OP_CONVERSION_CHECK(false, "aten::fill_diagonal_ required tensor with static rank >= 2 "); } auto flatten_input = context.mark_node(std::make_shared(input_tensor, const_neg_one, false)); auto wrap = context.const_input(2); diff --git a/src/frontends/pytorch/src/op/gelu.cpp b/src/frontends/pytorch/src/op/gelu.cpp index 64afb511b31dc3..ac38e41b93a0ed 100644 --- a/src/frontends/pytorch/src/op/gelu.cpp +++ b/src/frontends/pytorch/src/op/gelu.cpp @@ -21,7 +21,7 @@ OutputVector translate_gelu_common(const NodeContext& context, const std::string if (approximate == "tanh") { return {context.mark_node(std::make_shared(x, ov::op::GeluApproximationMode::TANH))}; } - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported approximate for Gelu: ", approximate); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported approximate for Gelu: ", approximate); }; } // namespace diff --git a/src/frontends/pytorch/src/op/get_attr.cpp b/src/frontends/pytorch/src/op/get_attr.cpp index 58bc63e60a700e..c28da30cf17ef3 100644 --- a/src/frontends/pytorch/src/op/get_attr.cpp +++ b/src/frontends/pytorch/src/op/get_attr.cpp @@ -13,9 +13,9 @@ namespace op { OutputVector translate_get_attr(const NodeContext& context) { auto res = context.get_decoder()->try_decode_get_attr(); - FRONT_END_OP_CONVERSION_CHECK(res.size() > 0, - "Failed to obtain data from GetAttr with output tensor name: ", - context.get_decoder()->get_output_debug_name(0)); + PYTORCH_OP_CONVERSION_CHECK(res.size() > 0, + "Failed to obtain data from GetAttr with output tensor name: ", + context.get_decoder()->get_output_debug_name(0)); if (res.size() == 1) { auto node = res[0].get_node(); if (node->get_friendly_name() != node->get_name()) { diff --git a/src/frontends/pytorch/src/op/getitem.cpp b/src/frontends/pytorch/src/op/getitem.cpp index 58d3639cc8aa92..1aab3e765d237f 100644 --- a/src/frontends/pytorch/src/op/getitem.cpp +++ b/src/frontends/pytorch/src/op/getitem.cpp @@ -20,29 +20,29 @@ OutputVector translate_getitem(const NodeContext& context) { num_inputs_check(context, 2, 2); auto input = context.get_input(0); const auto idx_type = context.get_input_type(1); - FRONT_END_OP_CONVERSION_CHECK(!idx_type.is(), - "String index in aten::__getitem__ means dict input, this is not supported."); + PYTORCH_OP_CONVERSION_CHECK(!idx_type.is(), + "String index in aten::__getitem__ means dict input, this is not supported."); if (ov::as_type_ptr(input.get_node_shared_ptr())) { - FRONT_END_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::split"), - "special case for aten::__getitem__"); - FRONT_END_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::chunk"), - "special case for aten::__getitem__"); + PYTORCH_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::split"), + "special case for aten::__getitem__"); + PYTORCH_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::chunk"), + "special case for aten::__getitem__"); const auto&& list_elems = get_list_as_outputs(input); auto getitem_idx = context.const_input(1); if (getitem_idx < 0) { getitem_idx += list_elems.size(); } - FRONT_END_OP_CONVERSION_CHECK(getitem_idx < static_cast(list_elems.size()), - "Index: ", - getitem_idx, - " is out of bounds of input list of len: ", - list_elems.size()); + PYTORCH_OP_CONVERSION_CHECK(getitem_idx < static_cast(list_elems.size()), + "Index: ", + getitem_idx, + " is out of bounds of input list of len: ", + list_elems.size()); return {list_elems.at(getitem_idx)}; } if (ov::as_type_ptr(input.get_node_shared_ptr())) { const auto& outside_input_node = context.get_input_from_visible_context(0).get_node_shared_ptr(); - FRONT_END_OP_CONVERSION_CHECK(!ov::as_type_ptr(outside_input_node), - "Unsupported case: aten::__getitem__ is inside the body, and input is Loop."); + PYTORCH_OP_CONVERSION_CHECK(!ov::as_type_ptr(outside_input_node), + "Unsupported case: aten::__getitem__ is inside the body, and input is Loop."); } auto getitem_idx = context.get_input(1); auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); diff --git a/src/frontends/pytorch/src/op/grid_sampler.cpp b/src/frontends/pytorch/src/op/grid_sampler.cpp index 8c603813d888f7..d9b268d7aa4b3e 100644 --- a/src/frontends/pytorch/src/op/grid_sampler.cpp +++ b/src/frontends/pytorch/src/op/grid_sampler.cpp @@ -28,12 +28,12 @@ OutputVector translate_grid_sampler(const NodeContext& context) { {1, v9::GridSample::PaddingMode::BORDER}, {2, v9::GridSample::PaddingMode::REFLECTION}}; auto mode = context.const_input(2); - FRONT_END_OP_CONVERSION_CHECK(grid_sample_mode_map.count(mode), "Unknown interpolation mode: ", mode); + PYTORCH_OP_CONVERSION_CHECK(grid_sample_mode_map.count(mode), "Unknown interpolation mode: ", mode); attrs.mode = grid_sample_mode_map.at(mode); auto padding_mode = context.const_input(3); - FRONT_END_OP_CONVERSION_CHECK(grid_sample_padding_mode_map.count(padding_mode), - "Unknown padding mode: ", - padding_mode); + PYTORCH_OP_CONVERSION_CHECK(grid_sample_padding_mode_map.count(padding_mode), + "Unknown padding mode: ", + padding_mode); attrs.padding_mode = grid_sample_padding_mode_map.at(padding_mode); bool align_corners = false; if (!context.input_is_none(4)) { diff --git a/src/frontends/pytorch/src/op/if.cpp b/src/frontends/pytorch/src/op/if.cpp index 15d1c5e24c1873..5b5e07ab1bb7da 100644 --- a/src/frontends/pytorch/src/op/if.cpp +++ b/src/frontends/pytorch/src/op/if.cpp @@ -51,7 +51,7 @@ OutputVector translate_if(const NodeContext& context) { auto if_node = std::make_shared(context.get_input(0)); context.mark_node(if_node); auto decoder = context.get_decoder(); - FRONT_END_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 2, "If must have 2 subgraphs."); + PYTORCH_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 2, "If must have 2 subgraphs."); auto then_decoder = decoder->get_subgraph_decoder(0); auto then_body = context.convert_subgraph(0); @@ -72,13 +72,13 @@ OutputVector translate_if(const NodeContext& context) { auto session = context.get_session(); for (const auto& param : then_body->get_parameters()) { auto input_idx = session->decode_tensor_name(param->output(0)); - FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(input_idx) == 0, - "More than one then_body input with same tensor name: ", - input_idx, - "; existing: ", - inputs_map.at(input_idx)[0], - " adding: ", - param); + PYTORCH_OP_CONVERSION_CHECK(inputs_map.count(input_idx) == 0, + "More than one then_body input with same tensor name: ", + input_idx, + "; existing: ", + inputs_map.at(input_idx)[0], + " adding: ", + param); inputs_map[input_idx] = {param, nullptr}; } for (const auto& param : else_body->get_parameters()) { @@ -93,8 +93,8 @@ OutputVector translate_if(const NodeContext& context) { const auto num_outs = context.get_output_size(); const auto then_results = then_body->get_results(); const auto else_results = else_body->get_results(); - FRONT_END_OP_CONVERSION_CHECK(then_results.size() >= num_outs && else_results.size() >= num_outs, - "Else or then body have less outputs than prim::If requires."); + PYTORCH_OP_CONVERSION_CHECK(then_results.size() >= num_outs && else_results.size() >= num_outs, + "Else or then body have less outputs than prim::If requires."); for (size_t i = 0; i < num_outs; i++) { align_result_types(context, then_results[i], else_results[i]); res.push_back(if_node->set_output(then_results[i], else_results[i])); @@ -106,26 +106,26 @@ OutputVector translate_if(const NodeContext& context) { for (size_t i = num_outs; i < then_results.size(); i++) { const auto result = then_results[i]; auto output_idx = session->decode_tensor_name(result->input(0).get_source_output()); - FRONT_END_OP_CONVERSION_CHECK(extra_then_body_results.count(output_idx) == 0, - "More than one then_body output with same tensor name: ", - output_idx, - "; existing: ", - extra_then_body_results.at(output_idx), - " adding: ", - result); + PYTORCH_OP_CONVERSION_CHECK(extra_then_body_results.count(output_idx) == 0, + "More than one then_body output with same tensor name: ", + output_idx, + "; existing: ", + extra_then_body_results.at(output_idx), + " adding: ", + result); extra_then_body_results[output_idx] = result; extra_output_idxs.insert(output_idx); } for (size_t i = num_outs; i < else_results.size(); i++) { const auto result = else_results[i]; auto output_idx = session->decode_tensor_name(result->input(0).get_source_output()); - FRONT_END_OP_CONVERSION_CHECK(extra_else_body_results.count(output_idx) == 0, - "More than one else_body output with same tensor name: ", - output_idx, - "; existing: ", - extra_else_body_results.at(output_idx), - " adding: ", - result); + PYTORCH_OP_CONVERSION_CHECK(extra_else_body_results.count(output_idx) == 0, + "More than one else_body output with same tensor name: ", + output_idx, + "; existing: ", + extra_else_body_results.at(output_idx), + " adding: ", + result); extra_else_body_results[output_idx] = result; extra_output_idxs.insert(output_idx); } @@ -140,7 +140,7 @@ OutputVector translate_if(const NodeContext& context) { then_body->add_parameters({new_parameter}); then_body->add_results({new_result}); then_body->validate_nodes_and_infer_types(); - FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in else body: ", output_idx); + PYTORCH_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in else body: ", output_idx); inputs_map[output_idx][0] = new_parameter; extra_then_body_results[output_idx] = new_result; OPENVINO_DEBUG << "Modified then body: " << if_node << '\n'; @@ -152,7 +152,7 @@ OutputVector translate_if(const NodeContext& context) { else_body->add_parameters({new_parameter}); else_body->add_results({new_result}); else_body->validate_nodes_and_infer_types(); - FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in then body: ", output_idx); + PYTORCH_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in then body: ", output_idx); inputs_map[output_idx][1] = new_parameter; extra_else_body_results[output_idx] = new_result; OPENVINO_DEBUG << "Modified else body: " << if_node << '\n'; diff --git a/src/frontends/pytorch/src/op/im2col.cpp b/src/frontends/pytorch/src/op/im2col.cpp index 718e0eadaa4ca0..56545bc3270ff6 100644 --- a/src/frontends/pytorch/src/op/im2col.cpp +++ b/src/frontends/pytorch/src/op/im2col.cpp @@ -60,13 +60,13 @@ OutputVector translate_im2col(const NodeContext& context) { num_inputs_check(context, 5, 5); auto input = context.get_input(0); auto kernel_size = context.const_input>(1); - FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "kernel size should contains 2 elements"); + PYTORCH_OP_CONVERSION_CHECK(kernel_size.size() == 2, "kernel size should contains 2 elements"); auto dilation = context.const_input>(2); - FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "dilation should contains 2 elements"); + PYTORCH_OP_CONVERSION_CHECK(kernel_size.size() == 2, "dilation should contains 2 elements"); auto padding = context.const_input>(3); - FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "padding should contains 2 elements"); + PYTORCH_OP_CONVERSION_CHECK(kernel_size.size() == 2, "padding should contains 2 elements"); auto stride = context.const_input>(4); - FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "stride should contains 2 elements"); + PYTORCH_OP_CONVERSION_CHECK(kernel_size.size() == 2, "stride should contains 2 elements"); auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); auto input_shape = context.mark_node(std::make_shared(input, element::i32)); auto zero_f = context.mark_node(v0::Constant::create(element::f32, Shape{}, {0})); diff --git a/src/frontends/pytorch/src/op/index.cpp b/src/frontends/pytorch/src/op/index.cpp index a9aaf62257d71b..6030dd557deae3 100644 --- a/src/frontends/pytorch/src/op/index.cpp +++ b/src/frontends/pytorch/src/op/index.cpp @@ -224,7 +224,7 @@ OutputVector translate_index(const NodeContext& context) { ov::pass::NodeRegistry rg; auto rank = x.get_partial_shape().rank(); // index transformation supports only tensors with static rank - FRONT_END_OP_CONVERSION_CHECK(rank.is_static(), "Dynamic rank for aten::index input is not supported."); + PYTORCH_OP_CONVERSION_CHECK(rank.is_static(), "Dynamic rank for aten::index input is not supported."); auto res = index_on_list(rg, x, list_elems, rank.get_length()); context.mark_nodes(rg.get()); return res; @@ -266,7 +266,7 @@ OutputVector translate_index_fx(const NodeContext& context) { rank = context.get_decoder()->get_input_shape(0).rank(); } // index transformation supports only tensors with static rank - FRONT_END_OP_CONVERSION_CHECK(rank.is_static(), "Dynamic rank for aten::index input is not supported."); + PYTORCH_OP_CONVERSION_CHECK(rank.is_static(), "Dynamic rank for aten::index input is not supported."); auto res = index_on_list(rg, x, list_elems, rank.get_length()); context.mark_nodes(rg.get()); return res; diff --git a/src/frontends/pytorch/src/op/layer_norm.cpp b/src/frontends/pytorch/src/op/layer_norm.cpp index 974106e3aabe5d..9bcdd0c1bdd6f3 100644 --- a/src/frontends/pytorch/src/op/layer_norm.cpp +++ b/src/frontends/pytorch/src/op/layer_norm.cpp @@ -21,9 +21,9 @@ OutputVector translate_layer_norm(const NodeContext& context) { num_inputs_check(context, 5, 6); auto eps = context.const_input(4); auto normalized_shape = context.const_input(1); - FRONT_END_OP_CONVERSION_CHECK(normalized_shape.size() == 1, - "Translation for aten::layer_norm supports only single normalized_shape value, " - "which means normalizing over the last dimension."); + PYTORCH_OP_CONVERSION_CHECK(normalized_shape.size() == 1, + "Translation for aten::layer_norm supports only single normalized_shape value, " + "which means normalizing over the last dimension."); // TODO: support any dimension auto axes = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); auto out_node = diff --git a/src/frontends/pytorch/src/op/linspace.cpp b/src/frontends/pytorch/src/op/linspace.cpp index c2233bee15ee24..5bdf489d297566 100644 --- a/src/frontends/pytorch/src/op/linspace.cpp +++ b/src/frontends/pytorch/src/op/linspace.cpp @@ -44,7 +44,7 @@ OutputVector translate_linspace(const NodeContext& context) { out_tensor = fw_node->input_value(0); apply_dtype = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } else if (!context.input_is_none(3) && context.get_input_size() == 4) { // Case where dtype is inherited from out tensor. diff --git a/src/frontends/pytorch/src/op/list_construct.cpp b/src/frontends/pytorch/src/op/list_construct.cpp index e58a3c4744ff61..cc17aae928e37d 100644 --- a/src/frontends/pytorch/src/op/list_construct.cpp +++ b/src/frontends/pytorch/src/op/list_construct.cpp @@ -22,7 +22,7 @@ OutputVector translate_list_construct(const NodeContext& context) { for (size_t i = 0; i < context.get_input_size(); i++) { auto input = context.get_input_from_visible_context(i); auto c_node = std::dynamic_pointer_cast(input.get_node_shared_ptr()); - FRONT_END_OP_CONVERSION_CHECK(c_node, "Translation for prim::ListConstruct support only constant inputs"); + PYTORCH_OP_CONVERSION_CHECK(c_node, "Translation for prim::ListConstruct support only constant inputs"); if (c_node->get_shape().size() == 0) { c_node = std::make_shared(c_node->get_element_type(), Shape{1}, c_node->get_data_ptr()); consts.push_back(c_node); diff --git a/src/frontends/pytorch/src/op/list_unpack.cpp b/src/frontends/pytorch/src/op/list_unpack.cpp index 3a960f842352ed..30fe2d2ed5e8bd 100644 --- a/src/frontends/pytorch/src/op/list_unpack.cpp +++ b/src/frontends/pytorch/src/op/list_unpack.cpp @@ -31,7 +31,7 @@ OutputVector translate_list_unpack(const NodeContext& context) { } return res; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported operation type."); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported operation type."); } } else { return outputs; diff --git a/src/frontends/pytorch/src/op/loop.cpp b/src/frontends/pytorch/src/op/loop.cpp index 99c59b46f342a1..4aaf56b081f5b4 100644 --- a/src/frontends/pytorch/src/op/loop.cpp +++ b/src/frontends/pytorch/src/op/loop.cpp @@ -15,10 +15,10 @@ namespace op { OutputVector translate_loop(const NodeContext& context) { const auto& inputs = context.inputs(); - FRONT_END_OP_CONVERSION_CHECK(inputs.size() >= 2, "Loop must have at least 2 inputs."); + PYTORCH_OP_CONVERSION_CHECK(inputs.size() >= 2, "Loop must have at least 2 inputs."); auto loop = std::make_shared(inputs[0], inputs[1]); auto decoder = context.get_decoder(); - FRONT_END_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 1, "Loop must have 1 subgraph."); + PYTORCH_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 1, "Loop must have 1 subgraph."); auto subgraph_decoder = decoder->get_subgraph_decoder(0); auto body = context.convert_subgraph(0); loop->set_function(body); @@ -28,20 +28,20 @@ OutputVector translate_loop(const NodeContext& context) { // process outputs first auto session = context.get_session(); auto body_results = body->get_results(); - FRONT_END_OP_CONVERSION_CHECK(body_results.size() > 0, "At least one output from loop is required - condition."); + PYTORCH_OP_CONVERSION_CHECK(body_results.size() > 0, "At least one output from loop is required - condition."); std::map> output_idxs; // 0 output is condition, do not need to connect it for (size_t i = 1; i < body_results.size(); i++) { auto result = body_results[i]; auto out_idx = session->decode_tensor_name(result->input(0).get_source_output()); - FRONT_END_OP_CONVERSION_CHECK(output_idxs.count(out_idx) == 0, - "More then one body output with same tensor name."); + PYTORCH_OP_CONVERSION_CHECK(output_idxs.count(out_idx) == 0, + "More then one body output with same tensor name."); output_idxs[out_idx] = result; } auto body_parameters = body->get_parameters(); // #0 body parameter is counter; - FRONT_END_OP_CONVERSION_CHECK(body_parameters.size() > 0, "At least one input to Loop body is required"); + PYTORCH_OP_CONVERSION_CHECK(body_parameters.size() > 0, "At least one input to Loop body is required"); // Set counter type and shape body_parameters[0]->set_element_type(element::i32); body_parameters[0]->set_partial_shape(PartialShape{}); diff --git a/src/frontends/pytorch/src/op/lstm.cpp b/src/frontends/pytorch/src/op/lstm.cpp index 0ea42e8bfa1799..2f8af3d4759b6e 100644 --- a/src/frontends/pytorch/src/op/lstm.cpp +++ b/src/frontends/pytorch/src/op/lstm.cpp @@ -94,12 +94,12 @@ OutputVector generic_rnn(ov::pass::NodeRegistry& rg, bidirectional ? RecurrentSequenceDirection::BIDIRECTIONAL : RecurrentSequenceDirection::FORWARD; int64_t weights_per_layer = has_biases ? 4 : 2; int64_t mult = bidirectional ? 2 : 1; - FRONT_END_OP_CONVERSION_CHECK(static_cast(all_weights.size()) == num_layers * weights_per_layer * mult, - "Unexpected length of list with weights for rnn operation."); + PYTORCH_OP_CONVERSION_CHECK(static_cast(all_weights.size()) == num_layers * weights_per_layer * mult, + "Unexpected length of list with weights for rnn operation."); const auto w_hh = all_weights[1]; const auto w_hh_pshape = w_hh.get_partial_shape(); - FRONT_END_OP_CONVERSION_CHECK(w_hh_pshape.rank().is_static() && w_hh_pshape[1].is_static(), ""); + PYTORCH_OP_CONVERSION_CHECK(w_hh_pshape.rank().is_static() && w_hh_pshape[1].is_static(), ""); const auto hidden_size = w_hh_pshape[1].get_length(); const auto zero = v0::Constant::create(element::i32, Shape{}, {0}); @@ -118,7 +118,7 @@ OutputVector generic_rnn(ov::pass::NodeRegistry& rg, h0 = initial_states[0]; c0 = initial_states[1]; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); } Output prev_output = input; @@ -257,7 +257,7 @@ OutputVector generic_rnn(ov::pass::NodeRegistry& rg, c_res = rg.make(c_res, order_102); return {prev_output, h_res, c_res}; } - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); } } // namespace @@ -267,7 +267,7 @@ OutputVector translate_lstm(const NodeContext& context) { ov::pass::NodeRegistry rg; if (context.get_input_type(3).is()) { // lstm packed - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported lstm variant."); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported lstm variant."); } else { // aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, // bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) @@ -278,7 +278,7 @@ OutputVector translate_lstm(const NodeContext& context) { const auto num_layers = context.const_input(4); // const auto dropout = context.const_input(5); - skip const auto train = context.const_input(6); - FRONT_END_OP_CONVERSION_CHECK(!train, "LSTM in train mode is not supported."); + PYTORCH_OP_CONVERSION_CHECK(!train, "LSTM in train mode is not supported."); const auto bidirectional = context.const_input(7); const auto batch_first = context.const_input(8); @@ -310,7 +310,7 @@ OutputVector translate_gru(const NodeContext& context) { const auto num_layers = context.const_input(4); // const auto dropout = context.const_input(5); - skip const auto train = context.const_input(6); - FRONT_END_OP_CONVERSION_CHECK(!train, "GRU in train mode is not supported."); + PYTORCH_OP_CONVERSION_CHECK(!train, "GRU in train mode is not supported."); const auto bidirectional = context.const_input(7); const auto batch_first = context.const_input(8); @@ -340,13 +340,13 @@ OutputVector translate_rnn(const NodeContext& context) { const auto num_layers = context.const_input(4); // const auto dropout = context.const_input(5); - skip const auto train = context.const_input(6); - FRONT_END_OP_CONVERSION_CHECK(!train, "RNN in train mode is not supported."); + PYTORCH_OP_CONVERSION_CHECK(!train, "RNN in train mode is not supported."); const auto bidirectional = context.const_input(7); const auto batch_first = context.const_input(8); const auto weight = get_list_as_outputs(weight_v); const auto variant_it = RNN_VARIANT_MAP.find(context.get_op_type()); - FRONT_END_OP_CONVERSION_CHECK(variant_it != RNN_VARIANT_MAP.end(), "Unsupported RNN variant."); + PYTORCH_OP_CONVERSION_CHECK(variant_it != RNN_VARIANT_MAP.end(), "Unsupported RNN variant."); const auto res = generic_rnn(rg, variant_it->second, input, diff --git a/src/frontends/pytorch/src/op/multinomial.cpp b/src/frontends/pytorch/src/op/multinomial.cpp index c359c34bbcb657..bba2b045063f47 100644 --- a/src/frontends/pytorch/src/op/multinomial.cpp +++ b/src/frontends/pytorch/src/op/multinomial.cpp @@ -27,8 +27,8 @@ OutputVector translate_multinomial(const NodeContext& context) { auto input = context.get_input(0); auto num_samples = context.mark_node(std::make_shared(context.get_input(1), const_1, false)); auto replacement = context.const_input(2); - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(3), - "aten::multinomial conversion with generator is not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(3), + "aten::multinomial conversion with generator is not supported"); // Torch multinomial accept input of [class_probs] or [bs, class_probs], convert always to [bs, class_probs] for OV. auto input_shape = context.mark_node(std::make_shared(input, element::i32)); diff --git a/src/frontends/pytorch/src/op/native_multi_head_attention.cpp b/src/frontends/pytorch/src/op/native_multi_head_attention.cpp index 6ecc798b439394..e70fae3976d7b8 100644 --- a/src/frontends/pytorch/src/op/native_multi_head_attention.cpp +++ b/src/frontends/pytorch/src/op/native_multi_head_attention.cpp @@ -132,7 +132,7 @@ OutputVector translate_native_multi_head_attention(const NodeContext& context) { } else { // Once int/float mask type is supported in PyTorch, // remove this assert to allow for such masks in OV - FRONT_END_OP_CONVERSION_CHECK(1, "Non-boolean masks are not supported."); + PYTORCH_OP_CONVERSION_CHECK(1, "Non-boolean masks are not supported."); atten_mask = context.mark_node(std::make_shared(atten_mask, scaled_dot_product)); } diff --git a/src/frontends/pytorch/src/op/norm.cpp b/src/frontends/pytorch/src/op/norm.cpp index cd0311972dd0dd..73ec824ddd6059 100644 --- a/src/frontends/pytorch/src/op/norm.cpp +++ b/src/frontends/pytorch/src/op/norm.cpp @@ -53,8 +53,8 @@ Output norm_vector(const NodeContext& context, res = context.mark_node(std::make_shared(abs, dim, keep_dim)); } else if (p == 0) { auto input_rank = input_tensor.get_partial_shape().rank(); - FRONT_END_OP_CONVERSION_CHECK(input_rank.is_dynamic() || input_rank.get_length() == 1, - "ord=0 supported only for vector norm"); + PYTORCH_OP_CONVERSION_CHECK(input_rank.is_dynamic() || input_rank.get_length() == 1, + "ord=0 supported only for vector norm"); auto zero = context.mark_node(v0::Constant::create(element::f32, Shape{}, {0})); zero = context.mark_node(std::make_shared(zero, input_tensor)); auto cond = context.mark_node(std::make_shared(input_tensor, zero)); @@ -100,7 +100,7 @@ Output norm_matrix(const NodeContext& context, auto sum = context.mark_node(std::make_shared(abs, first_dim, true)); res = context.mark_node(std::make_shared(sum, second_dim, true)); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported ord ", p, " for matrix norm"); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported ord ", p, " for matrix norm"); } if (!keep_dim) { res = context.mark_node(std::make_shared(res, dim)); @@ -139,7 +139,7 @@ OutputVector translate_norm(const NodeContext& context) { if (p_str == "fro") { res = frobenius_norm(context, input_tensor, dim, keep_dim); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); } } else { auto p = context.const_input(1); @@ -230,7 +230,7 @@ OutputVector translate_linalg_matrix_norm(const NodeContext& context) { if (p_str == "fro") { result = frobenius_norm(context, x, dim, keep_dim); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); } } else { auto p = context.const_input(1); @@ -272,8 +272,7 @@ OutputVector translate_linalg_norm(const NodeContext& context) { } else if (input_rank.is_dynamic() || input_rank.get_length() == 1) { result = norm_vector(context, x, dim, 2, keep_dim); } else { - FRONT_END_OP_CONVERSION_CHECK(false, - "linalg norm for tensor rank > 2 without ord specification unsupported"); + PYTORCH_OP_CONVERSION_CHECK(false, "linalg norm for tensor rank > 2 without ord specification unsupported"); } } else { // ord defines the norm that is computed can be string or number @@ -283,7 +282,7 @@ OutputVector translate_linalg_norm(const NodeContext& context) { if (p_str == "fro") { result = frobenius_norm(context, x, dim, keep_dim); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); } } else { auto p = context.const_input(1); diff --git a/src/frontends/pytorch/src/op/pad.cpp b/src/frontends/pytorch/src/op/pad.cpp index 390277edcf5796..4f6e186599544a 100644 --- a/src/frontends/pytorch/src/op/pad.cpp +++ b/src/frontends/pytorch/src/op/pad.cpp @@ -91,10 +91,10 @@ OutputVector translate_pad_common(const NodeContext& context, {"replicate", PadMode::EDGE}, }; auto ov_mode = pt_to_ov_pad.find(mode); - FRONT_END_OP_CONVERSION_CHECK(ov_mode != pt_to_ov_pad.end(), - "aten::pad conversion doesn't support [ ", - mode, - " ] padding mode"); + PYTORCH_OP_CONVERSION_CHECK(ov_mode != pt_to_ov_pad.end(), + "aten::pad conversion doesn't support [ ", + mode, + " ] padding mode"); return {context.mark_node(std::make_shared(data, pads_begins, pads_ends, pad_value_, ov_mode->second))}; } } // namespace diff --git a/src/frontends/pytorch/src/op/pythonop.cpp b/src/frontends/pytorch/src/op/pythonop.cpp index ccaac4a4909004..6040e6bbc97bf9 100644 --- a/src/frontends/pytorch/src/op/pythonop.cpp +++ b/src/frontends/pytorch/src/op/pythonop.cpp @@ -13,16 +13,15 @@ namespace op { OutputVector translate_pythonop(const NodeContext& context) { auto decoder = context.get_decoder(); - FRONT_END_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 1, - "PythonOp must have 1 subgraph to be able to translate it to OV."); + PYTORCH_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 1, + "PythonOp must have 1 subgraph to be able to translate it to OV."); auto body = context.convert_subgraph(0); auto session = context.get_session(); std::map inputs_map; for (const auto& param : body->get_parameters()) { auto tensor_idx = session->decode_tensor_name(param->output(0)); - FRONT_END_OP_CONVERSION_CHECK(!inputs_map.count(tensor_idx), - "Multiple nodes with the same id are not allowed."); + PYTORCH_OP_CONVERSION_CHECK(!inputs_map.count(tensor_idx), "Multiple nodes with the same id are not allowed."); inputs_map[tensor_idx] = {param}; } for (const auto& input : inputs_map) { diff --git a/src/frontends/pytorch/src/op/quantized_convnd.cpp b/src/frontends/pytorch/src/op/quantized_convnd.cpp index 37ab867d72a4ad..485ce9f9d71eb8 100644 --- a/src/frontends/pytorch/src/op/quantized_convnd.cpp +++ b/src/frontends/pytorch/src/op/quantized_convnd.cpp @@ -23,16 +23,16 @@ Output translate_quantized_convnd_base(const NodeContext& context) { auto input = context.get_input(0); auto packed_params_node = std::dynamic_pointer_cast(context.get_input(1).get_node_shared_ptr()); - FRONT_END_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); + PYTORCH_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); const auto& attrs = packed_params_node->get_attrs(); - FRONT_END_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), - "Packed params input node does not contain information about op type."); - FRONT_END_OP_CONVERSION_CHECK((attrs.at(PtFrameworkNode::op_type_key) == "prim::GetAttr"), - "Incorrect packed params input node operator type, expected prim::GetAttr."); + PYTORCH_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), + "Packed params input node does not contain information about op type."); + PYTORCH_OP_CONVERSION_CHECK((attrs.at(PtFrameworkNode::op_type_key) == "prim::GetAttr"), + "Incorrect packed params input node operator type, expected prim::GetAttr."); auto packed_params = packed_params_node->inputs(); - FRONT_END_OP_CONVERSION_CHECK(packed_params.size() == 6, - "Packed parameters for quantized conv should contain 6 items."); + PYTORCH_OP_CONVERSION_CHECK(packed_params.size() == 6, + "Packed parameters for quantized conv should contain 6 items."); // Packed params: weight, bias, stride, padding, dilation, groups auto weight = packed_params[0].get_source_output(); auto bias = packed_params[1].get_source_output(); diff --git a/src/frontends/pytorch/src/op/quantized_linear.cpp b/src/frontends/pytorch/src/op/quantized_linear.cpp index a69013f3fabb6b..e414d0c6f5a62f 100644 --- a/src/frontends/pytorch/src/op/quantized_linear.cpp +++ b/src/frontends/pytorch/src/op/quantized_linear.cpp @@ -20,16 +20,16 @@ OutputVector translate_quantized_linear(const NodeContext& context) { auto x = context.get_input(0); auto packed_params_node = std::dynamic_pointer_cast(context.get_input(1).get_node_shared_ptr()); - FRONT_END_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); + PYTORCH_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); const auto& attrs = packed_params_node->get_attrs(); - FRONT_END_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), - "Packed params input node does not contain information about op type."); - FRONT_END_OP_CONVERSION_CHECK((attrs.at(PtFrameworkNode::op_type_key) == "prim::GetAttr"), - "Incorrect packed params input node operator type, expected prim::GetAttr."); + PYTORCH_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), + "Packed params input node does not contain information about op type."); + PYTORCH_OP_CONVERSION_CHECK((attrs.at(PtFrameworkNode::op_type_key) == "prim::GetAttr"), + "Incorrect packed params input node operator type, expected prim::GetAttr."); auto packed_params = packed_params_node->inputs(); - FRONT_END_OP_CONVERSION_CHECK(packed_params.size() == 2, - "Packed parameters for quantized linear should contain 2 items."); + PYTORCH_OP_CONVERSION_CHECK(packed_params.size() == 2, + "Packed parameters for quantized linear should contain 2 items."); auto weights = packed_params[0].get_source_output(); auto bias = packed_params[1].get_source_output(); diff --git a/src/frontends/pytorch/src/op/rand.cpp b/src/frontends/pytorch/src/op/rand.cpp index d04b3bbd2780b7..a5a6771d36d581 100644 --- a/src/frontends/pytorch/src/op/rand.cpp +++ b/src/frontends/pytorch/src/op/rand.cpp @@ -56,8 +56,8 @@ OutputVector translate_rand(const NodeContext& context) { auto dtype = element::f32; size_t out_id = 1; if (context.get_input_size() == 3) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), - "aten::randn conversion with generator does not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::randn conversion with generator does not supported"); out_id = 2; } // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -75,8 +75,8 @@ OutputVector translate_rand(const NodeContext& context) { Output convert_like_out; size_t dtype_id = 1; if (context.get_input_size() == 6) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), - "aten::rand conversion with generator does not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::rand conversion with generator does not supported"); dtype_id = 2; } if (!context.input_is_none(dtype_id)) { @@ -92,7 +92,7 @@ OutputVector translate_rand(const NodeContext& context) { dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto res = context.mark_node(std::make_shared(sizes, low, high, dtype)); @@ -130,7 +130,7 @@ OutputVector translate_rand_like(const NodeContext& context) { dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto res = context.mark_node(std::make_shared(sizes, low, high, dtype)); @@ -150,8 +150,8 @@ OutputVector translate_randn(const NodeContext& context) { auto dtype = element::f32; size_t out_id = 1; if (context.get_input_size() == 3) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), - "aten::randn conversion with generator does not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::randn conversion with generator does not supported"); out_id = 2; } // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -165,8 +165,8 @@ OutputVector translate_randn(const NodeContext& context) { } size_t dtype_id = 1; if (context.get_input_size() == 6) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), - "aten::randn conversion with generator does not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::randn conversion with generator does not supported"); dtype_id = 2; } // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? @@ -186,7 +186,7 @@ OutputVector translate_randn(const NodeContext& context) { dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto scale = context.mark_node(v0::Constant::create(dtype, Shape{1}, {1})); @@ -226,7 +226,7 @@ OutputVector translate_randn_like(const NodeContext& context) { dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto scale = context.mark_node(v0::Constant::create(dtype, Shape{1}, {1})); @@ -256,7 +256,7 @@ OutputVector translate_randint(const NodeContext& context) { convert_like_out = fw_node->input_value(0); dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } low = context.mark_node(std::make_shared(low, dtype)); @@ -331,7 +331,7 @@ OutputVector translate_normal(const NodeContext& context) { convert_like_out = fw_node->input_value(0); dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto res = make_random_normal(context, sizes, dtype, std, mean); @@ -340,9 +340,9 @@ OutputVector translate_normal(const NodeContext& context) { } return res; } else { - FRONT_END_OP_CONVERSION_CHECK(false, - "Unsupported number of inputs to aten::normal operation: ", - context.get_input_size()); + PYTORCH_OP_CONVERSION_CHECK(false, + "Unsupported number of inputs to aten::normal operation: ", + context.get_input_size()); } } diff --git a/src/frontends/pytorch/src/op/scatter.cpp b/src/frontends/pytorch/src/op/scatter.cpp index d60cfd91bf6c90..afbf8c2208d3a2 100644 --- a/src/frontends/pytorch/src/op/scatter.cpp +++ b/src/frontends/pytorch/src/op/scatter.cpp @@ -58,9 +58,9 @@ const v12::ScatterElementsUpdate::Reduction get_reduction_mode(const std::string {"amax", v12::ScatterElementsUpdate::Reduction::MAX}, {"amin", v12::ScatterElementsUpdate::Reduction::MIN}}; - FRONT_END_OP_CONVERSION_CHECK(TORCH_REDUCTION_TO_OV.count(pt_reduce_mode), - "Unknown reduction mode: ", - pt_reduce_mode); + PYTORCH_OP_CONVERSION_CHECK(TORCH_REDUCTION_TO_OV.count(pt_reduce_mode), + "Unknown reduction mode: ", + pt_reduce_mode); auto reduction = TORCH_REDUCTION_TO_OV.at(pt_reduce_mode); return reduction; } diff --git a/src/frontends/pytorch/src/op/slice.cpp b/src/frontends/pytorch/src/op/slice.cpp index 20f51a8e786745..62b65922e455ec 100644 --- a/src/frontends/pytorch/src/op/slice.cpp +++ b/src/frontends/pytorch/src/op/slice.cpp @@ -40,7 +40,7 @@ OutputVector translate_slice_common(const NodeContext& context, const size_t num step_idx = 3; dim = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Slice must have either 4 or 5 inputs."); + PYTORCH_OP_CONVERSION_CHECK(false, "Slice must have either 4 or 5 inputs."); } // TODO: support default start/end with negative step ov::Output start; diff --git a/src/frontends/pytorch/src/op/to.cpp b/src/frontends/pytorch/src/op/to.cpp index d902caeb8487bc..cc563217f5eb03 100644 --- a/src/frontends/pytorch/src/op/to.cpp +++ b/src/frontends/pytorch/src/op/to.cpp @@ -54,7 +54,7 @@ OutputVector translate_to(const NodeContext& context) { return {context.get_input(0)}; } } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unknown aten::to format"); + PYTORCH_OP_CONVERSION_CHECK(false, "Unknown aten::to format"); } // We ignore both non_blocking and copy inputs since non_blocking argument is used diff --git a/src/frontends/pytorch/src/op/tuple_index.cpp b/src/frontends/pytorch/src/op/tuple_index.cpp index 320733d701284d..b1f0917168d08b 100644 --- a/src/frontends/pytorch/src/op/tuple_index.cpp +++ b/src/frontends/pytorch/src/op/tuple_index.cpp @@ -22,8 +22,8 @@ OutputVector translate_tuple_index(const NodeContext& context) { if (cast_fw_node(tuple, "prim::TupleConstruct")) { // this case require index to be constant auto index = context.const_input(1); - FRONT_END_OP_CONVERSION_CHECK(static_cast(index) < tuple->get_input_size(), - "Index of TupleIndex operation is higher then number of tuple elements."); + PYTORCH_OP_CONVERSION_CHECK(static_cast(index) < tuple->get_input_size(), + "Index of TupleIndex operation is higher then number of tuple elements."); return {tuple->get_input_source_output(index)}; } else { // Assume this case is when tuple is represented as tensor diff --git a/src/frontends/pytorch/src/op/upsample.cpp b/src/frontends/pytorch/src/op/upsample.cpp index 1d405cf60acd07..83a1e59d93d4bc 100644 --- a/src/frontends/pytorch/src/op/upsample.cpp +++ b/src/frontends/pytorch/src/op/upsample.cpp @@ -41,7 +41,7 @@ OutputVector base_translate_upsample(const NodeContext& context, } else if (dims == 3) { spatial_axes = {2, 3, 4}; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported number of dimensions in upsample"); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported number of dimensions in upsample"); } auto target_axes = std::make_shared(element::i32, Shape{spatial_axes.size()}, spatial_axes); auto scales = @@ -50,7 +50,7 @@ OutputVector base_translate_upsample(const NodeContext& context, context.mark_node(std::make_shared(element::i32, Shape{dims}, std::vector(dims, 1))); Output scales_sizes; if (context.input_is_none(1)) { - FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(scale_id), "Scale or Output size should be provided"); + PYTORCH_OP_CONVERSION_CHECK(!context.input_is_none(scale_id), "Scale or Output size should be provided"); auto spatial_scales = context.get_input(scale_id); if (context.get_input_type(1).is()) { spatial_scales = concat_list_construct(spatial_scales); diff --git a/src/frontends/pytorch/src/op/where.cpp b/src/frontends/pytorch/src/op/where.cpp index 3d03706970bc67..3c8bf86d84adda 100644 --- a/src/frontends/pytorch/src/op/where.cpp +++ b/src/frontends/pytorch/src/op/where.cpp @@ -17,7 +17,7 @@ using namespace ov::op; OutputVector translate_where(const NodeContext& context) { num_inputs_check(context, 1, 3); auto cond = context.get_input(0); - FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(1), "aten::where(cond) unsupported"); + PYTORCH_OP_CONVERSION_CHECK(!context.input_is_none(1), "aten::where(cond) unsupported"); auto bool_cond = context.mark_node(std::make_shared(cond, element::boolean)); auto x = context.get_input(1); auto y = context.get_input(2); diff --git a/src/frontends/pytorch/src/translate_session.cpp b/src/frontends/pytorch/src/translate_session.cpp index 1791326f41a57b..47c60d872831d7 100644 --- a/src/frontends/pytorch/src/translate_session.cpp +++ b/src/frontends/pytorch/src/translate_session.cpp @@ -9,6 +9,7 @@ #include "input_model.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/slice.hpp" +#include "openvino/util/common_util.hpp" #include "openvino/util/log.hpp" #include "place.hpp" #include "pt_framework_node.hpp" @@ -273,6 +274,7 @@ OutputVector TranslateSession::convert_node(const NodeContext& context) { OPENVINO_DEBUG << "No translator found for: " << context.get_op_type() << "\n"; } catch (std::exception& e) { exception = e.what(); + m_telemetry->send_event("error_info", ov::util::filter_lines_by_prefix(exception, "[PyTorch Frontend]")); } catch (...) { exception = "Unknown exception type."; } diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 3cb5798af9f65c..26973ba4dc8acb 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -20,6 +20,17 @@ class FrameworkNode; namespace frontend { namespace pytorch { +/// \brief Macro to check whether a boolean condition holds. +/// \param COND Condition to check +/// \param ... Additional error message info to be added to the error message via the `<<` +/// stream-insertion operator. Note that the expressions here will be evaluated lazily, +/// i.e., only if the `cond` evalutes to `false`. +/// \throws ::ov::frontend::OpConversionFailure if `cond` is false. +#ifndef PYTORCH_OP_CONVERSION_CHECK +# define PYTORCH_OP_CONVERSION_CHECK(COND, ...) \ + OPENVINO_ASSERT_HELPER(::ov::frontend::OpConversionFailure, "", (COND), "[PyTorch Frontend] " __VA_ARGS__) +#endif + void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs); Output make_optional_bias(const Output& base_op,