Skip to content

Commit

Permalink
Remove quantize_io from compile_spec (#7647)
Browse files Browse the repository at this point in the history
quantize_io was only used in arm_partitioner
and is not needed there anymore when
running the delegate in the graph.

Signed-off-by: Erik Lundell <[email protected]>
  • Loading branch information
Erik-Lundell authored Jan 16, 2025
1 parent 0dba025 commit d1b33cb
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 55 deletions.
21 changes: 1 addition & 20 deletions backends/arm/arm_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,7 @@ def __init__(self):
self.compiler_flags = []
self.output_format = None
self.path_for_intermediates = None
self.quantize_io = False
self.tosa_spec = None
self.tosa_version = None
self.input_order = None

def ethosu_compile_spec(
Expand Down Expand Up @@ -123,14 +122,6 @@ def dump_intermediate_artifacts_to(
self.path_for_intermediates = output_path
return self

def set_quantize_io(self, quantize_io: bool = False) -> "ArmCompileSpecBuilder":
"""
Quantization of inputs and dequantization of outputs for cases where
whole graph is quantized and method signature is not of quantized type.
"""
self.quantize_io = quantize_io
return self

def set_input_order(
self, input_order: Optional[str] = None
) -> "ArmCompileSpecBuilder":
Expand Down Expand Up @@ -170,9 +161,6 @@ def build(self) -> List[CompileSpec]:
)
)

if self.quantize_io:
self.compile_spec.append(CompileSpec("quantize_io", "True".encode()))

return self.compile_spec


Expand All @@ -183,13 +171,6 @@ def is_tosa(compile_spec: List[CompileSpec]) -> bool:
return False


def is_quantize_io(compile_specs: List[CompileSpec]) -> bool:
for spec in compile_specs:
if spec.key == "quantize_io" and spec.value.decode() == "True":
return True
return False


def get_tosa_version(compile_spec: List[CompileSpec]) -> TosaSpecification:
for spec in compile_spec:
if spec.key == "tosa_version":
Expand Down
4 changes: 0 additions & 4 deletions backends/arm/arm_partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
import torch
from executorch.backends.arm.arm_backend import (
ArmBackend,
is_quantize_io,
) # usort: skip
from executorch.backends.arm.operator_support.tosa_supported_operators import (
TOSASupportedOperators,
Expand Down Expand Up @@ -89,9 +88,6 @@ def is_partitioned(node: torch.fx.Node, tag=tag) -> bool:
node.meta["delegation_tag"] = tag
partition_tags[tag] = self.delegation_spec

if not is_quantize_io(self.delegation_spec.compile_specs):
continue

# De-tag outmost q-nodes upwards and dq-nodes downwards.
# De-tag if at least one input/ output is not part of partition.
for node in partition.nodes:
Expand Down
9 changes: 0 additions & 9 deletions backends/arm/test/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,44 +78,38 @@ def get_tosa_compile_spec_unbuilt(
ArmCompileSpecBuilder()
.tosa_compile_spec(tosa_spec)
.dump_intermediate_artifacts_to(custom_path)
.set_quantize_io(True)
)

return compile_spec_builder


def get_u55_compile_spec(
quantize_io=True,
custom_path=None,
reorder_inputs=None,
) -> list[CompileSpec]:
"""
Default compile spec for Ethos-U55 tests.
"""
return get_u55_compile_spec_unbuilt(
quantize_io=quantize_io,
custom_path=custom_path,
reorder_inputs=reorder_inputs,
).build()


def get_u85_compile_spec(
quantize_io=True,
custom_path=None,
reorder_inputs=None,
) -> list[CompileSpec]:
"""
Default compile spec for Ethos-U85 tests.
"""
return get_u85_compile_spec_unbuilt(
quantize_io=quantize_io,
custom_path=custom_path,
reorder_inputs=reorder_inputs,
).build()


def get_u55_compile_spec_unbuilt(
quantize_io=True,
custom_path=None,
reorder_inputs=None,
) -> ArmCompileSpecBuilder:
Expand All @@ -133,15 +127,13 @@ def get_u55_compile_spec_unbuilt(
memory_mode="Shared_Sram",
extra_flags="--debug-force-regor --output-format=raw",
)
.set_quantize_io(quantize_io)
.dump_intermediate_artifacts_to(artifact_path)
.set_input_order(reorder_inputs)
)
return compile_spec


def get_u85_compile_spec_unbuilt(
quantize_io=True,
custom_path=None,
reorder_inputs=None,
) -> list[CompileSpec]:
Expand All @@ -157,7 +149,6 @@ def get_u85_compile_spec_unbuilt(
memory_mode="Shared_Sram",
extra_flags="--output-format=raw",
)
.set_quantize_io(quantize_io)
.dump_intermediate_artifacts_to(artifact_path)
.set_input_order(reorder_inputs)
)
Expand Down
30 changes: 10 additions & 20 deletions backends/arm/test/ops/test_depthwise_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,58 +259,48 @@ def test_dw_conv_tosa_BI(self, test_name: str, model: torch.nn.Module):

@parameterized.expand(testsuite_conv2d[:4], skip_on_empty=True)
@pytest.mark.corstone_fvp
def test_dw_conv2d_u55_BI(
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = True
):
def test_dw_conv2d_u55_BI(self, test_name: str, model: torch.nn.Module):
self._test_dw_conv_ethos_BI_pipeline(
model,
common.get_u55_compile_spec(quantize_io=set_quantize_io),
common.get_u55_compile_spec(),
model.get_inputs(),
)

@parameterized.expand(testsuite_conv2d[4:], skip_on_empty=True)
@pytest.mark.corstone_fvp
@conftest.expectedFailureOnFVP # TODO: MLETORCH-516
def test_dw_conv2d_u55_BI_xfails(
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = False
):
def test_dw_conv2d_u55_BI_xfails(self, test_name: str, model: torch.nn.Module):
self._test_dw_conv_ethos_BI_pipeline(
model,
common.get_u55_compile_spec(quantize_io=set_quantize_io),
common.get_u55_compile_spec(),
model.get_inputs(),
)

@parameterized.expand(testsuite_conv1d, skip_on_empty=True)
@pytest.mark.corstone_fvp
def test_dw_conv1d_u55_BI(
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = True
):
def test_dw_conv1d_u55_BI(self, test_name: str, model: torch.nn.Module):
self._test_dw_conv_ethos_BI_pipeline(
model,
common.get_u55_compile_spec(quantize_io=set_quantize_io),
common.get_u55_compile_spec(),
model.get_inputs(),
)

@parameterized.expand(testsuite_conv1d + testsuite_conv2d_u85)
@pytest.mark.corstone_fvp
def test_dw_conv_u85_BI(
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = True
):
def test_dw_conv_u85_BI(self, test_name: str, model: torch.nn.Module):
self._test_dw_conv_ethos_BI_pipeline(
model,
common.get_u85_compile_spec(quantize_io=set_quantize_io),
common.get_u85_compile_spec(),
model.get_inputs(),
)

# All test cases except 3x3_1x3x256x256_gp3_st1 have numerical issues on FVP. MLETORCH-520
@parameterized.expand(testsuite_conv2d_u85_xfails)
@pytest.mark.corstone_fvp
@conftest.expectedFailureOnFVP
def test_dw_conv_u85_BI_xfails(
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = True
):
def test_dw_conv_u85_BI_xfails(self, test_name: str, model: torch.nn.Module):
self._test_dw_conv_ethos_BI_pipeline(
model,
common.get_u85_compile_spec(quantize_io=set_quantize_io),
common.get_u85_compile_spec(),
model.get_inputs(),
)
2 changes: 0 additions & 2 deletions examples/arm/aot_arm_compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,6 @@ def get_compile_spec(
memory_mode=memory_mode,
extra_flags="--debug-force-regor --output-format=raw --verbose-operators --verbose-cycle-estimate",
)
.set_quantize_io(True)
.set_input_order(reorder_inputs)
)
elif "ethos-u85" in target:
Expand All @@ -287,7 +286,6 @@ def get_compile_spec(
memory_mode=memory_mode,
extra_flags="--output-format=raw --verbose-operators --verbose-cycle-estimate",
)
.set_quantize_io(True)
.set_input_order(reorder_inputs)
)

Expand Down

0 comments on commit d1b33cb

Please sign in to comment.