Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump paddlepaddle from 2.5.0 to 2.6.0 in /tests #11

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/op/add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ OutputVector translate_add_common(const NodeContext& context, bool inplace) {
if (dtype0.is<type::List>() && dtype1.is<type::List>()) {
// aten::add.t(t[] a, t[] b) -> t[]
// Case when two lists gets concatenated
FRONT_END_OP_CONVERSION_CHECK(false, "aten::add is used for concatenation of lists, not possible to convert");
PYTORCH_OP_CONVERSION_CHECK(false, "aten::add is used for concatenation of lists, not possible to convert");
}
if (inplace) {
if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type())
Expand Down
8 changes: 4 additions & 4 deletions src/frontends/pytorch/src/op/arange.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ OutputVector translate_arange(const NodeContext& context) {
dtype_port = 3;
dtype_applied = true;
} else {
FRONT_END_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type());
PYTORCH_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type());
}
if (dtype_port >= 0 && !context.input_is_none(dtype_port)) {
if (std::dynamic_pointer_cast<v0::Constant>(
Expand All @@ -72,7 +72,7 @@ OutputVector translate_arange(const NodeContext& context) {
out_tensor = fw_node->input_value(0);
dtype_applied = false;
} else {
FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input");
PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input");
}
}
auto range = context.mark_node(std::make_shared<v4::Range>(start, end, step, dtype));
Expand Down Expand Up @@ -130,7 +130,7 @@ OutputVector translate_arange_fx(const NodeContext& context) {
dtype_port = 3;
dtype_applied = true;
} else {
FRONT_END_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type());
PYTORCH_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type());
}
if (dtype_port >= 0 && !context.input_is_none(dtype_port)) {
if (std::dynamic_pointer_cast<v0::Constant>(
Expand All @@ -142,7 +142,7 @@ OutputVector translate_arange_fx(const NodeContext& context) {
out_tensor = fw_node->input_value(0);
dtype_applied = false;
} else {
FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input");
PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input");
}
}
auto r_end = context.mark_node(std::make_shared<v0::Convert>(end, dtype));
Expand Down
8 changes: 4 additions & 4 deletions src/frontends/pytorch/src/op/as_strided.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ OutputVector translate_as_strided(const NodeContext& context) {
auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0}));
auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1}));
auto input_strides = decoder->get_input_strides(0);
FRONT_END_OP_CONVERSION_CHECK(input_strides.size() != 0,
"aten::as_strided: Couldn't retrive input stride information from torchscript.");
PYTORCH_OP_CONVERSION_CHECK(input_strides.size() != 0,
"aten::as_strided: Couldn't retrive input stride information from torchscript.");

std::vector<size_t> idxs(input_strides.size());
iota(idxs.begin(), idxs.end(), 0);
Expand Down Expand Up @@ -77,8 +77,8 @@ OutputVector translate_as_strided(const NodeContext& context) {
if (!context.input_is_none(3)) {
offset = context.get_input(3);
}
FRONT_END_OP_CONVERSION_CHECK(sizes.size() == strides.size(),
"aten::as_strided: Vector for strides and sizes need to have equal length.");
PYTORCH_OP_CONVERSION_CHECK(sizes.size() == strides.size(),
"aten::as_strided: Vector for strides and sizes need to have equal length.");
auto strides_size = strides.size() - 1;
auto i = 0;
auto strides_length_const = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {strides.size()}));
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/op/as_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ OutputVector translate_as_tensor(const NodeContext& context) {
return {context.mark_node(std::make_shared<v0::Concat>(OutputVector(list_elems.begin(), list_elems.end()), 0))};
} else {
// Input is already a tensor
FRONT_END_OP_CONVERSION_CHECK(list_elems.size() == 1, "Input must be single tensor.");
PYTORCH_OP_CONVERSION_CHECK(list_elems.size() == 1, "Input must be single tensor.");
return {list_elems[0]};
}
};
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/pytorch/src/op/avg_poolnd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ OutputVector translate_avg_poolnd(const NodeContext& context) {
if (!(context.input_is_none(5))) {
count_include_pad = context.const_input<bool>(5);
}
FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(6),
"Translation for aten::avg_pool2d do not support divisor_override input.");
PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(6),
"Translation for aten::avg_pool2d do not support divisor_override input.");
// Although ov::AvgPool provides exclude_pad=false,
// The corner case of Average Pooling with ceil_mode on
// PyTorch allows sliding window go off bound, which leads to this accommodation.
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/pytorch/src/op/batch_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,8 @@ OutputVector translate_batch_norm_legit_no_stats_fx(const NodeContext& context)
bias = context.get_input(2);
}
auto training = context.const_input<bool>(3);
FRONT_END_OP_CONVERSION_CHECK(training,
"aten._native_batch_norm_legit.no_stats can only be used when training=True.");
PYTORCH_OP_CONVERSION_CHECK(training,
"aten._native_batch_norm_legit.no_stats can only be used when training=True.");
// index 4 momentum is used during training only
auto eps = context.const_input<float>(5);
auto output = make_batch_norm(context, context.get_input(0), weight, bias, {}, {}, eps);
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/pytorch/src/op/cat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ OutputVector translate_cat_common(const NodeContext& context,
return {context.mark_node(fw_node)};
}
auto first_node = list_elems.front().get_node_shared_ptr();
FRONT_END_OP_CONVERSION_CHECK(
PYTORCH_OP_CONVERSION_CHECK(
list_elems.size() > 1 || !ov::as_type_ptr<v0::Parameter>(first_node),
"<aten/quantized>::cat is located inside body while inputs are located outside of the body. "
"This case is not supported.");
Expand Down Expand Up @@ -86,7 +86,7 @@ OutputVector translate_quantized_cat(const NodeContext& context) {
num_inputs_check(context, 4, 4);
const auto&& list_elems = get_list_as_outputs(context.get_input(0));
auto axis = context.const_input<int64_t>(1);
FRONT_END_OP_CONVERSION_CHECK(!list_elems.empty(), "Couldn't find quantized input for quantized::cat operation.");
PYTORCH_OP_CONVERSION_CHECK(!list_elems.empty(), "Couldn't find quantized input for quantized::cat operation.");
return {quantize(context,
translate_cat_common(context, list_elems, axis, false)[0],
context.get_input(2),
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/op/conv_transposend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ OutputVector translate_conv_transposend(const NodeContext& context) {
auto pad_type = ov::op::PadType::EXPLICIT;
auto dilations = context.const_input<Strides>(7);
auto groups = context.const_input<int64_t>(6);
FRONT_END_OP_CONVERSION_CHECK(groups > 0, "Number of groups for convolution_transpose should be >= 1");
PYTORCH_OP_CONVERSION_CHECK(groups > 0, "Number of groups for convolution_transpose should be >= 1");

std::shared_ptr<ov::Node> conv;
if (groups == 1) {
Expand Down
8 changes: 4 additions & 4 deletions src/frontends/pytorch/src/op/elu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ OutputVector translate_elu(const NodeContext& context) {
auto x = context.get_input(0);
auto alpha = context.const_input<float>(1);
// TODO: Figure out what scale and input_scale do
FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input<int64_t>(2) == 1,
"Unexpected value of scale input for elu operation");
FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(3) || context.const_input<int64_t>(3) == 1,
"Unexpected value of input_scale input for elu operation");
PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input<int64_t>(2) == 1,
"Unexpected value of scale input for elu operation");
PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(3) || context.const_input<int64_t>(3) == 1,
"Unexpected value of input_scale input for elu operation");
return {context.mark_node(std::make_shared<ov::op::v0::Elu>(x, alpha))};
};

Expand Down
4 changes: 2 additions & 2 deletions src/frontends/pytorch/src/op/embedding_bag.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ OutputVector translate_embedding_bag(const NodeContext& context) {
num_inputs_check(context, 9, 9);
// we have only EmbeddingBagSum case support, check it before translation
auto mode = context.const_input<int64_t>(4);
FRONT_END_OP_CONVERSION_CHECK(mode == 0, "Only sum mode supported for aten::embedding_bag translation");
PYTORCH_OP_CONVERSION_CHECK(mode == 0, "Only sum mode supported for aten::embedding_bag translation");
auto weight = context.get_input(0);
auto indices = context.get_input(1);
indices = context.mark_node(std::make_shared<ov::op::v0::Convert>(indices, element::i32));
Expand All @@ -44,7 +44,7 @@ OutputVector translate_embedding_bag(const NodeContext& context) {
auto offsets = context.get_input(2);
offsets = context.mark_node(std::make_shared<ov::op::v0::Convert>(offsets, element::i32));
auto include_last_offset = context.const_input<bool>(7);
FRONT_END_OP_CONVERSION_CHECK(!include_last_offset, "Inclusion last offset is not supported");
PYTORCH_OP_CONVERSION_CHECK(!include_last_offset, "Inclusion last offset is not supported");
// no per_sample_wights
if (context.input_is_none(6)) {
result = context.mark_node(std::make_shared<ov::op::v3::EmbeddingBagOffsetsSum>(weight, indices, offsets));
Expand Down
8 changes: 4 additions & 4 deletions src/frontends/pytorch/src/op/expand.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ OutputVector translate_expand(const NodeContext& context) {
auto x = context.get_input(0);
auto sizes = context.get_input(1);
// TODO: figure out what implicit means
FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input<bool>(2) == false,
"Unexpected value of implicit for expand operation");
PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input<bool>(2) == false,
"Unexpected value of implicit for expand operation");
return base_expand(context, x, sizes);
};

Expand All @@ -54,8 +54,8 @@ OutputVector translate_expand_fx(const NodeContext& context) {
}
auto sizes = context.get_input(1);
// TODO: figure out what implicit means
FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input<bool>(2) == false,
"Unexpected value of implicit for expand operation");
PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input<bool>(2) == false,
"Unexpected value of implicit for expand operation");
return base_expand(context, x, sizes);
};

Expand Down
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/op/eye.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ OutputVector translate_eye(const NodeContext& context) {
y = context.mark_node(std::make_shared<v0::Convert>(y, element::i32));
dtype_id = 2;
} else {
FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported number of inputs: ", num_inputs, " for aten::eye");
PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported number of inputs: ", num_inputs, " for aten::eye");
}
if (!context.input_is_none(dtype_id)) {
dtype = convert_dtype(context.const_input<int64_t>(dtype_id));
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/op/full.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ OutputVector translate_fill_diagonal(const NodeContext& context) {
auto const_zero_s = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0}));
auto const_neg_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1}));
if (input_rank.is_dynamic() || input_rank.get_length() < 2) {
FRONT_END_OP_CONVERSION_CHECK(false, "aten::fill_diagonal_ required tensor with static rank >= 2 ");
PYTORCH_OP_CONVERSION_CHECK(false, "aten::fill_diagonal_ required tensor with static rank >= 2 ");
}
auto flatten_input = context.mark_node(std::make_shared<v1::Reshape>(input_tensor, const_neg_one, false));
auto wrap = context.const_input<bool>(2);
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/op/gelu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ OutputVector translate_gelu_common(const NodeContext& context, const std::string
if (approximate == "tanh") {
return {context.mark_node(std::make_shared<ov::op::v7::Gelu>(x, ov::op::GeluApproximationMode::TANH))};
}
FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported approximate for Gelu: ", approximate);
PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported approximate for Gelu: ", approximate);
};
} // namespace

Expand Down
6 changes: 3 additions & 3 deletions src/frontends/pytorch/src/op/get_attr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ namespace op {

OutputVector translate_get_attr(const NodeContext& context) {
auto res = context.get_decoder()->try_decode_get_attr();
FRONT_END_OP_CONVERSION_CHECK(res.size() > 0,
"Failed to obtain data from GetAttr with output tensor name: ",
context.get_decoder()->get_output_debug_name(0));
PYTORCH_OP_CONVERSION_CHECK(res.size() > 0,
"Failed to obtain data from GetAttr with output tensor name: ",
context.get_decoder()->get_output_debug_name(0));
if (res.size() == 1) {
auto node = res[0].get_node();
if (node->get_friendly_name() != node->get_name()) {
Expand Down
26 changes: 13 additions & 13 deletions src/frontends/pytorch/src/op/getitem.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,29 +20,29 @@ OutputVector translate_getitem(const NodeContext& context) {
num_inputs_check(context, 2, 2);
auto input = context.get_input(0);
const auto idx_type = context.get_input_type(1);
FRONT_END_OP_CONVERSION_CHECK(!idx_type.is<type::Str>(),
"String index in aten::__getitem__ means dict input, this is not supported.");
PYTORCH_OP_CONVERSION_CHECK(!idx_type.is<type::Str>(),
"String index in aten::__getitem__ means dict input, this is not supported.");
if (ov::as_type_ptr<ov::op::util::FrameworkNode>(input.get_node_shared_ptr())) {
FRONT_END_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::split"),
"special case for aten::__getitem__");
FRONT_END_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::chunk"),
"special case for aten::__getitem__");
PYTORCH_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::split"),
"special case for aten::__getitem__");
PYTORCH_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::chunk"),
"special case for aten::__getitem__");
const auto&& list_elems = get_list_as_outputs(input);
auto getitem_idx = context.const_input<int64_t>(1);
if (getitem_idx < 0) {
getitem_idx += list_elems.size();
}
FRONT_END_OP_CONVERSION_CHECK(getitem_idx < static_cast<int64_t>(list_elems.size()),
"Index: ",
getitem_idx,
" is out of bounds of input list of len: ",
list_elems.size());
PYTORCH_OP_CONVERSION_CHECK(getitem_idx < static_cast<int64_t>(list_elems.size()),
"Index: ",
getitem_idx,
" is out of bounds of input list of len: ",
list_elems.size());
return {list_elems.at(getitem_idx)};
}
if (ov::as_type_ptr<v0::Parameter>(input.get_node_shared_ptr())) {
const auto& outside_input_node = context.get_input_from_visible_context(0).get_node_shared_ptr();
FRONT_END_OP_CONVERSION_CHECK(!ov::as_type_ptr<v5::Loop>(outside_input_node),
"Unsupported case: aten::__getitem__ is inside the body, and input is Loop.");
PYTORCH_OP_CONVERSION_CHECK(!ov::as_type_ptr<v5::Loop>(outside_input_node),
"Unsupported case: aten::__getitem__ is inside the body, and input is Loop.");
}
auto getitem_idx = context.get_input(1);
auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0}));
Expand Down
8 changes: 4 additions & 4 deletions src/frontends/pytorch/src/op/grid_sampler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,12 @@ OutputVector translate_grid_sampler(const NodeContext& context) {
{1, v9::GridSample::PaddingMode::BORDER},
{2, v9::GridSample::PaddingMode::REFLECTION}};
auto mode = context.const_input<int64_t>(2);
FRONT_END_OP_CONVERSION_CHECK(grid_sample_mode_map.count(mode), "Unknown interpolation mode: ", mode);
PYTORCH_OP_CONVERSION_CHECK(grid_sample_mode_map.count(mode), "Unknown interpolation mode: ", mode);
attrs.mode = grid_sample_mode_map.at(mode);
auto padding_mode = context.const_input<int64_t>(3);
FRONT_END_OP_CONVERSION_CHECK(grid_sample_padding_mode_map.count(padding_mode),
"Unknown padding mode: ",
padding_mode);
PYTORCH_OP_CONVERSION_CHECK(grid_sample_padding_mode_map.count(padding_mode),
"Unknown padding mode: ",
padding_mode);
attrs.padding_mode = grid_sample_padding_mode_map.at(padding_mode);
bool align_corners = false;
if (!context.input_is_none(4)) {
Expand Down
Loading
Loading