diff --git a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py index 7dc40e310330cf..1a49a989c11df2 100644 --- a/tests/layer_tests/ovc_python_api_tests/test_pytorch.py +++ b/tests/layer_tests/ovc_python_api_tests/test_pytorch.py @@ -1181,6 +1181,19 @@ def forward(self, a, b): )} +def create_pytorch_module_with_output(tmp_dir): + class PTModel(torch.nn.Module): + def forward(self, a, b): + return a + b + + net = PTModel() + return net, None, { + "example_input": ( + torch.tensor([5, 6], dtype=torch.float32), + torch.tensor([5, 6], dtype=torch.float32), + ), "output": "some_name"} + + class TestMoConvertPyTorch(CommonMOConvertTest): test_data = [ 'create_pytorch_nn_module_case1', @@ -1255,6 +1268,23 @@ def test_mo_import_from_memory(self, create_model, ie_device, precision, ir_vers self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False) + @pytest.mark.parametrize("create_model,exception", [ + ('create_pytorch_module_with_output', AssertionError) + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_mo_import_from_memory_negative(self, create_model, exception, + ie_device, precision, ir_version, + temp_dir, use_legacy_frontend): + fw_model, graph_ref, mo_params = eval(create_model)(temp_dir) + + test_params = {'input_model': fw_model} + if mo_params is not None: + test_params.update(mo_params) + with pytest.raises(exception): + self._test_by_ref_graph(temp_dir, test_params, + graph_ref, compare_tensor_names=False) + def create_pt_model_with_custom_op(): # diff --git a/tools/ovc/openvino/tools/ovc/moc_frontend/pipeline.py b/tools/ovc/openvino/tools/ovc/moc_frontend/pipeline.py index 12722b5d771b75..4a297707a0e537 100644 --- a/tools/ovc/openvino/tools/ovc/moc_frontend/pipeline.py +++ b/tools/ovc/openvino/tools/ovc/moc_frontend/pipeline.py @@ -126,16 +126,13 @@ def merge_inputs(inputs, to_set_list): res.append(p) return res iplaces = merge_inputs(model_inputs, iplaces) - # Currently this only work to reorder inputs/outputs + oplaces = [] + # Currently this only work to reorder inputs to_override_all_inputs = check_places_are_same(model_inputs, [{"node": p} for p in iplaces]) to_override_all_outputs = False if argv.output: - oplaces = [] _outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name()) - for out_desc in _outputs: - oplaces.append(out_desc["name"]) - model_outputs = input_model.get_outputs() - to_override_all_outputs = check_places_are_same(model_outputs, [{"node": p} for p in oplaces]) + assert len(_outputs) == 0, "`output` argument is not supported for PyTorch" if to_override_all_inputs and to_override_all_outputs: input_model.extract_subgraph(iplaces, oplaces) elif to_override_all_inputs: