Skip to content

Commit

Permalink
xfail for FX backend conformance test
Browse files Browse the repository at this point in the history
  • Loading branch information
anzr299 committed Feb 14, 2025
1 parent 49d6251 commit 960e12f
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 5 deletions.
4 changes: 4 additions & 0 deletions tests/post_training/data/ptq_reference_data.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ torchvision/resnet18_backend_CUDA_TORCH:
metric_value: 0.69152
torchvision/resnet18_backend_FX_TORCH:
metric_value: 0.6946
xfail_reason: "Issue-CVS-162009"
torchvision/resnet18_backend_CUDA_FX_TORCH:
metric_value: 0.6946
torchvision/mobilenet_v3_small_BC_backend_FP32:
Expand All @@ -49,6 +50,7 @@ torchvision/mobilenet_v3_small_BC_backend_ONNX:
metric_value: 0.6679
torchvision/mobilenet_v3_small_BC_backend_FX_TORCH:
metric_value: 0.6679
xfail_reason: "Issue-CVS-162009"
torchvision/mobilenet_v3_small_BC_backend_CUDA_FX_TORCH:
metric_value: 0.6664
torchvision/vit_b_16_backend_FP32:
Expand All @@ -57,6 +59,7 @@ torchvision/vit_b_16_backend_OV:
metric_value: 0.80948
torchvision/vit_b_16_backend_FX_TORCH:
metric_value: 0.80922
xfail_reason: "Issue-CVS-162009"
torchvision/vit_b_16_backend_CUDA_FX_TORCH:
metric_value: 0.80922
torchvision/swin_v2_s_backend_FP32:
Expand All @@ -65,6 +68,7 @@ torchvision/swin_v2_s_backend_OV:
metric_value: 0.83638
torchvision/swin_v2_s_backend_FX_TORCH:
metric_value: 0.8360
xfail_reason: "Issue-CVS-162009"
torchvision/swin_v2_s_backend_CUDA_FX_TORCH:
metric_value: 0.8360
timm/crossvit_9_240_backend_CUDA_TORCH:
Expand Down
5 changes: 3 additions & 2 deletions tests/post_training/pipelines/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ def save_compressed_model(self) -> None:
self.path_compressed_ir = self.output_model_dir / "model.xml"
ov.serialize(ov_model, self.path_compressed_ir)
elif self.backend in FX_BACKENDS:
exported_model = torch.export.export_for_inference(self.compressed_model.cpu(), (self.dummy_tensor.cpu(),))
exported_model = torch.export.export(self.compressed_model.cpu(), (self.dummy_tensor.cpu(),))
ov_model = ov.convert_model(exported_model, example_input=self.dummy_tensor.cpu(), input=self.input_size)
ov_model.reshape(self.input_size)
self.path_compressed_ir = self.output_model_dir / "model.xml"
Expand All @@ -468,7 +468,8 @@ def get_num_compressed(self) -> None:
"""
Get number of the FakeQuantize nodes in the compressed IR.
"""

if self.backend in FX_BACKENDS:
return
ie = ov.Core()
model = ie.read_model(model=self.path_compressed_ir)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def _torch_export_for_training(model: torch.nn.Module, args: Tuple[Any, ...]) ->


def _torch_export(model: torch.nn.Module, args: Tuple[Any, ...]) -> torch.fx.GraphModule:
return torch.export.export_for_inference(model, args).module()
return torch.export.export(model, args).module()


@dataclass
Expand Down Expand Up @@ -118,14 +118,14 @@ def _dump_model_fp32(self) -> None:
if self.backend in PT_BACKENDS:
with disable_patching():
ov_model = ov.convert_model(
torch.export.export_for_inference(self.model, args=(self.dummy_tensor,)),
self.model,
example_input=self.dummy_tensor,
input=self.input_size,
)
ov.serialize(ov_model, self.fp32_model_dir / "model_fp32.xml")

if self.backend in FX_BACKENDS:
exported_model = torch.export.export_for_inference(self.model.cpu(), (self.dummy_tensor.cpu(),))
exported_model = torch.export.export(self.model.cpu(), (self.dummy_tensor.cpu(),))
ov_model = ov.convert_model(exported_model, example_input=self.dummy_tensor, input=self.input_size)
ov.serialize(ov_model, self.fp32_model_dir / "fx_model_fp32.xml")

Expand Down

0 comments on commit 960e12f

Please sign in to comment.