diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..c4d8055 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,23 @@ +# Copyright 2025 The IREE Authors +# +# Licensed under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +name: Lint + +on: [pull_request] + +permissions: + contents: read + +jobs: + pre-commit: + runs-on: ubuntu-24.04 + steps: + - name: Checking out repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Setting up python + uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + - name: Running pre-commit + uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 diff --git a/.gitignore b/.gitignore index d6bad38..85b687a 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ .python-version *.venv .venv +venv # Python deployment artifacts *.whl @@ -40,6 +41,9 @@ Testing/ *.pt *.safetensors *.gguf +artifacts/ +compilation_info.json +*.vmfbs # job summary files job_summary.json diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..9517673 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,15 @@ +# Pre-commit (https://pre-commit.com) configuration for assorted lint checks. +# +# See https://pre-commit.com/hooks.html for more hooks. + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: + - id: check-merge-conflict + + - repo: https://github.com/psf/black + rev: 23.3.0 + hooks: + - id: black + name: Run Black to format Python files diff --git a/linalg_ops/attention/generate_e2e_attention_tests.py b/linalg_ops/attention/generate_e2e_attention_tests.py index 4a27483..463c90c 100644 --- a/linalg_ops/attention/generate_e2e_attention_tests.py +++ b/linalg_ops/attention/generate_e2e_attention_tests.py @@ -50,12 +50,14 @@ class ShapesId(enum.Enum): MEDIUM = "medium" LARGE = "large" + # Enumerates ways to construct MLIR tensor types. @enum.unique class Dynamicity(enum.Enum): MIXED = "mixed" # Randomly mix '?' and values. Example: tensor. STATIC = "static" # Use fixed values everywhere. Example: tensor<4x6xf32>. + # batch: Batch dimension # m: M dimension of first and second matmul # n: N dimension of second matmul @@ -90,6 +92,7 @@ def get_test_shapes(shapes_id: ShapesId): raise ValueError(shapes_id) + # A shape dimension value, i.e. a size value that could appear in a MLIR type # such as 'tensor'. None means a dynamic size, similar to '?' in MLIR. @dataclasses.dataclass @@ -118,6 +121,7 @@ def int_or_question_mark(s: DimSize): def int_or_DYN(s: DimSize): return s.value or "DYN" + # Determines the shape of input and kernel tensors. @dataclasses.dataclass class TestInputTensorShapes: @@ -133,7 +137,6 @@ class TestInputTensorShapes: # converts from the runtime shape dimensions in TestShape and given dynamicity to # the set of shapes to be used in a test function's input tensors. def generate_shapes_and_scale(shape: TestShapeAndScale, dynamicity: Dynamicity): - m = shape_dim(shape.m, dynamicity) k2 = shape_dim(shape.k2, dynamicity) if dynamicity == Dynamicity.MIXED: @@ -262,18 +265,23 @@ def generate_function( ) if dynamicity == Dynamicity.MIXED: func_definition = func_definition + ( - f" %c1 = arith.constant 1 : index\n" - f" %m_in = tensor.dim %query, %c1 : {query_tensor_type}\n" - f" %k2_in = tensor.dim %key, %c1 : {key_tensor_type}\n" - f" %m = util.assume.int %m_in : index\n" - f" %k2 = util.assume.int %k2_in : index\n" - f" %query_mixed = flow.tensor.tie_shape %query : {query_tensor_type}""{%m}\n" - f" %key_mixed = flow.tensor.tie_shape %key : {key_tensor_type}""{%k2}\n" - f" %value_mixed = flow.tensor.tie_shape %value : {value_tensor_type}""{%k2}\n" - f" %result0 = tensor.empty(%m): {result_tensor_type}\n" + f" %c1 = arith.constant 1 : index\n" + f" %m_in = tensor.dim %query, %c1 : {query_tensor_type}\n" + f" %k2_in = tensor.dim %key, %c1 : {key_tensor_type}\n" + f" %m = util.assume.int %m_in : index\n" + f" %k2 = util.assume.int %k2_in : index\n" + f" %query_mixed = flow.tensor.tie_shape %query : {query_tensor_type}" + "{%m}\n" + f" %key_mixed = flow.tensor.tie_shape %key : {key_tensor_type}" + "{%k2}\n" + f" %value_mixed = flow.tensor.tie_shape %value : {value_tensor_type}" + "{%k2}\n" + f" %result0 = tensor.empty(%m): {result_tensor_type}\n" ) else: - func_definition = func_definition + ( f" %result0 = tensor.empty(): {result_tensor_type}\n") + func_definition = func_definition + ( + f" %result0 = tensor.empty(): {result_tensor_type}\n" + ) func_definition = func_definition + ( f" %scale_f16 = arith.truncf %scale : {F32} to {F16}\n" diff --git a/litert_models/utils.py b/litert_models/utils.py index 59c26a0..424cd92 100644 --- a/litert_models/utils.py +++ b/litert_models/utils.py @@ -87,10 +87,7 @@ def compile_mlir_with_iree( compile_args.extend(compile_flags) compile_args.extend(["-o", iree_module_path]) compile_cmd = subprocess.list2cmdline(compile_args) - logger.info( - f"Launching compile command:\n" # - f" cd {cwd} && {compile_cmd}" - ) + logger.info(f"Launching compile command:\n" f" cd {cwd} && {compile_cmd}") # ret = subprocess.run(compile_cmd, shell=True, capture_output=True) if ret.returncode != 0: logger.error(f"Compilation of '{iree_module_path}' failed") diff --git a/onnx_models/conftest.py b/onnx_models/conftest.py index 6efeed7..63b8195 100644 --- a/onnx_models/conftest.py +++ b/onnx_models/conftest.py @@ -251,10 +251,7 @@ def compile_mlir_with_iree( compile_args.extend(compile_flags) compile_args.extend(["-o", iree_module_path.relative_to(cwd)]) compile_cmd = subprocess.list2cmdline(compile_args) - logger.info( - f"Launching compile command:\n" # - f" cd {cwd} && {compile_cmd}" - ) + logger.info(f"Launching compile command:\n" f" cd {cwd} && {compile_cmd}") # ret = subprocess.run(compile_cmd, shell=True, capture_output=True, cwd=cwd) if ret.returncode != 0: logger.error(f"Compilation of '{iree_module_path}' failed") @@ -271,10 +268,7 @@ def run_iree_module(iree_module_path: Path, run_flags: list[str]): run_args = ["iree-run-module", f"--module={iree_module_path.relative_to(cwd)}"] run_args.extend(run_flags) run_cmd = subprocess.list2cmdline(run_args) - logger.info( - f"Launching run command:\n" # - f" cd {cwd} && {run_cmd}" - ) + logger.info(f"Launching run command:\n" f" cd {cwd} && {run_cmd}") # ret = subprocess.run(run_cmd, shell=True, capture_output=True, cwd=cwd) if ret.returncode != 0: logger.error(f"Run of '{iree_module_path}' failed") diff --git a/onnx_models/utils.py b/onnx_models/utils.py index 6f8fca9..3746fbe 100644 --- a/onnx_models/utils.py +++ b/onnx_models/utils.py @@ -169,10 +169,7 @@ def import_onnx_model_to_mlir(onnx_path: Path) -> Path: str(imported_mlir_path), ] import_cmd = subprocess.list2cmdline(import_args) - logger.info( - f"Running import command:\n" # - f" {import_cmd}" - ) + logger.info(f"Running import command:\n" f" {import_cmd}") # ret = subprocess.run(import_cmd, shell=True, capture_output=True) if ret.returncode != 0: logger.error(f"Import of '{onnx_path.name}' failed!") diff --git a/onnx_ops/conftest.py b/onnx_ops/conftest.py index ab1b65e..0c83684 100644 --- a/onnx_ops/conftest.py +++ b/onnx_ops/conftest.py @@ -329,8 +329,7 @@ def runtest(self): def test_compile(self): cwd = self.test_cwd logging.getLogger().info( - f"Launching compile command:\n" # - f"cd {cwd} && {self.compile_cmd}" + f"Launching compile command:\n" f"cd {cwd} && {self.compile_cmd}" # ) proc = subprocess.run( self.compile_cmd, shell=True, capture_output=True, cwd=cwd @@ -346,8 +345,7 @@ def test_compile(self): def test_run(self): cwd = self.test_cwd logging.getLogger().info( - f"Launching run command:\n" # - f"cd {cwd} && {self.run_cmd}" + f"Launching run command:\n" f"cd {cwd} && {self.run_cmd}" # ) proc = subprocess.run(self.run_cmd, shell=True, capture_output=True, cwd=cwd) if proc.returncode != 0: diff --git a/sharktank_models/action.yml b/sharktank_models/action.yml index 4bc1b88..34e539e 100644 --- a/sharktank_models/action.yml +++ b/sharktank_models/action.yml @@ -9,21 +9,16 @@ name: IREE Sharktank Models Tests inputs: MATRIX_NAME: description: "Identifying name of the matrix variation" - required: true SKU: description: "Type of SKU to test" - required: true CHIP: description: "Type of chip to test" - required: true VENV_DIR: description: "Path to the virtual environment" TARGET: description: "Type of target to test" - required: true GPU: description: "Type of GPU to test" - required: true runs: using: "composite" diff --git a/sharktank_models/benchmarks/conftest.py b/sharktank_models/benchmarks/conftest.py index dcbcbf0..c2f4f14 100644 --- a/sharktank_models/benchmarks/conftest.py +++ b/sharktank_models/benchmarks/conftest.py @@ -18,21 +18,42 @@ logger = logging.getLogger(__name__) + def pytest_sessionstart(session): - with open("job_summary.md", "a") as job_summary, open("job_summary.json", "w+") as content: + with open("job_summary.md", "a") as job_summary, open( + "job_summary.json", "w+" + ) as content: print(f"{sku.upper()} Complete Benchmark Summary:\n", file=job_summary) json.dump({}, content) - + logger.info("Pytest benchmark test session is starting") - + + def pytest_sessionfinish(session, exitstatus): markdown_data = { - "time_summary": ["Model name", "Submodel name", "Current time (ms)", "Expected/golden time (ms)"], - "dispatch_summary": ["Model name", "Submodel name", "Current dispatch count", "Expected/golden dispatch count"], - "size_summary": ["Model name", "Submodel name", "Current binary size (bytes)", "Expected/golden binary size (bytes)"] + "time_summary": [ + "Model name", + "Submodel name", + "Current time (ms)", + "Expected/golden time (ms)", + ], + "dispatch_summary": [ + "Model name", + "Submodel name", + "Current dispatch count", + "Expected/golden dispatch count", + ], + "size_summary": [ + "Model name", + "Submodel name", + "Current binary size (bytes)", + "Expected/golden binary size (bytes)", + ], } - - with open("job_summary.md", "a") as job_summary, open("job_summary.json", "r") as content: + + with open("job_summary.md", "a") as job_summary, open( + "job_summary.json", "r" + ) as content: summary_data = json.loads(content.read()) for key, value in markdown_data.items(): if key in summary_data: @@ -40,31 +61,35 @@ def pytest_sessionfinish(session, exitstatus): summary_data.get(key), headers=value, tablefmt="pipe" ) print("\n" + table_data, file=job_summary) - + logger.info("Pytest benchmark test session has finished") + def pytest_collect_file(parent, file_path): - if file_path.suffix == ".json" and "job_summary" not in file_path.name and "benchmarks" in str(THIS_DIR): + if ( + file_path.suffix == ".json" + and "job_summary" not in file_path.name + and "benchmarks" in str(THIS_DIR) + ): return SharkTankModelBenchmarkTests.from_parent(parent, path=file_path) -@dataclass(frozen = True) + +@dataclass(frozen=True) class BenchmarkTestSpec: model_name: str benchmark_file_name: str - + + class SharkTankModelBenchmarkTests(pytest.File): - def collect(self): path = str(self.path).split("/") benchmark_file_name = path[-1].replace(".json", "") model_name = path[-2] - + item_name = f"{model_name} :: {benchmark_file_name}" - + spec = BenchmarkTestSpec( - model_name = model_name, - benchmark_file_name = benchmark_file_name + model_name=model_name, benchmark_file_name=benchmark_file_name ) - + yield ModelBenchmarkRunItem.from_parent(self, name=item_name, spec=spec) - \ No newline at end of file diff --git a/sharktank_models/benchmarks/model_benchmark_run.py b/sharktank_models/benchmarks/model_benchmark_run.py index 8ffc10d..b5ab277 100644 --- a/sharktank_models/benchmarks/model_benchmark_run.py +++ b/sharktank_models/benchmarks/model_benchmark_run.py @@ -30,6 +30,8 @@ """ Helper methods """ + + # Converts a list of inputs into compiler friendly input arguments def get_input_list(input_list): return [f"--input={entry}" for entry in input_list] @@ -85,13 +87,14 @@ def e2e_iree_benchmark_module_args(modules, file_suffix): class ModelBenchmarkRunItem(pytest.Item): - def __init__(self, spec, **kwargs): super().__init__(**kwargs) self.spec = spec self.model_name = self.spec.model_name self.benchmark_file_name = self.spec.benchmark_file_name - SUBMODEL_FILE_PATH = THIS_DIR / f"{self.model_name}/{self.benchmark_file_name}.json" + SUBMODEL_FILE_PATH = ( + THIS_DIR / f"{self.model_name}/{self.benchmark_file_name}.json" + ) split_file_name = self.benchmark_file_name.split("_") self.submodel_name = "_".join(split_file_name[:-1]) type_of_backend = split_file_name[-1] @@ -164,9 +167,13 @@ def runtest(self): # If there are modules for an e2e pipeline test, reset exec_args and directory_compile variables to custom variables if self.modules: - all_modules_found, exec_args = e2e_iree_benchmark_module_args(self.modules, self.file_suffix) + all_modules_found, exec_args = e2e_iree_benchmark_module_args( + self.modules, self.file_suffix + ) if not all_modules_found: - pytest.skip(f"Modules needed for {self.model_name} :: {self.submodel_name} not found, unable to run benchmark tests. Skipping...") + pytest.skip( + f"Modules needed for {self.model_name} :: {self.submodel_name} not found, unable to run benchmark tests. Skipping..." + ) vmfb_file_path = f"{vmfb_dir}/{self.compiled_file_name}.vmfb" exec_args += ( @@ -176,9 +183,11 @@ def runtest(self): + get_input_list(self.inputs) + self.benchmark_flags ) - + if not Path(vmfb_file_path).is_file(): - pytest.skip(f"Vmfb file for {self.model_name} :: {self.submodel_name} was not found. Unable to run benchmark tests, skipping...") + pytest.skip( + f"Vmfb file for {self.model_name} :: {self.submodel_name} was not found. Unable to run benchmark tests, skipping..." + ) # run iree benchmark command ret_value, output = iree_benchmark_module( @@ -198,10 +207,17 @@ def runtest(self): # golden time check if self.golden_time: # Writing to time summary - mean_time_row = [self.model_name, self.submodel_name, str(benchmark_mean_time), self.golden_time] + mean_time_row = [ + self.model_name, + self.submodel_name, + str(benchmark_mean_time), + self.golden_time, + ] with open("job_summary.json", "r+") as job_summary: file_data = json.loads(job_summary.read()) - file_data["time_summary"] = file_data.get("time_summary", []) + [mean_time_row] + file_data["time_summary"] = file_data.get("time_summary", []) + [ + mean_time_row + ] job_summary.seek(0) json.dump(file_data, job_summary) @@ -226,10 +242,17 @@ def runtest(self): comp_stats["stream-aggregate"]["execution"]["dispatch-count"] ) - dispatch_count_row = [self.model_name, self.submodel_name, dispatch_count, self.golden_dispatch] + dispatch_count_row = [ + self.model_name, + self.submodel_name, + dispatch_count, + self.golden_dispatch, + ] with open("job_summary.json", "r+") as job_summary: file_data = json.loads(job_summary.read()) - file_data["dispatch_summary"] = file_data.get("dispatch_summary", []) + [mean_time_row] + file_data["dispatch_summary"] = file_data.get( + "dispatch_summary", [] + ) + [mean_time_row] job_summary.seek(0) json.dump(file_data, job_summary) @@ -250,13 +273,20 @@ def runtest(self): module_path = f"{directory_compile}/model.{self.file_suffix}.vmfb" binary_size = Path(module_path).stat().st_size - binary_size_row = [self.model_name, self.submodel_name, binary_size, self.golden_size] + binary_size_row = [ + self.model_name, + self.submodel_name, + binary_size, + self.golden_size, + ] with open("job_summary.json", "r+") as job_summary: file_data = json.loads(job_summary.read()) - file_data["size_summary"] = file_data.get("size_summary", []) + [mean_time_row] + file_data["size_summary"] = file_data.get("size_summary", []) + [ + mean_time_row + ] job_summary.seek(0) json.dump(file_data, job_summary) - + logger.info( ( f"{self.model_name} {self.submodel_name} binary size: {binary_size} bytes" @@ -269,9 +299,9 @@ def runtest(self): self.golden_size, f"{self.model_name} {self.submodel_name} binary size should not get bigger", ) - + def repr_failure(self, excinfo): return super().repr_failure(excinfo) - + def reportinfo(self): return self.path, 0, f"usecase: {self.name}" diff --git a/sharktank_models/quality_tests/README.md b/sharktank_models/quality_tests/README.md index 6e0cd82..c3942f5 100644 --- a/sharktank_models/quality_tests/README.md +++ b/sharktank_models/quality_tests/README.md @@ -9,7 +9,7 @@ - Example command to run quality tests for a specific model ``` -python sharktank_models/quality_tests/ \ +pytest sharktank_models/quality_tests/ \ -rpFe \ --log-cli-level=info \ --timeout=600 \ diff --git a/sharktank_models/quality_tests/conftest.py b/sharktank_models/quality_tests/conftest.py index ecd6004..2216497 100644 --- a/sharktank_models/quality_tests/conftest.py +++ b/sharktank_models/quality_tests/conftest.py @@ -14,34 +14,36 @@ THIS_DIR = Path(__file__).parent logger = logging.getLogger(__name__) + def pytest_configure(): pytest.vmfb_manager = {} + def pytest_sessionstart(session): logger.info("Pytest quality test session is starting") + def pytest_collect_file(parent, file_path): if file_path.suffix == ".json" and "quality_tests" in str(THIS_DIR): return SharkTankModelQualityTests.from_parent(parent, path=file_path) -@dataclass(frozen = True) + +@dataclass(frozen=True) class QualityTestSpec: model_name: str quality_file_name: str - + + class SharkTankModelQualityTests(pytest.File): - def collect(self): path = str(self.path).split("/") quality_file_name = path[-1].replace(".json", "") model_name = path[-2] - + item_name = f"{model_name} :: {quality_file_name}" - + spec = QualityTestSpec( - model_name = model_name, - quality_file_name = quality_file_name + model_name=model_name, quality_file_name=quality_file_name ) - + yield ModelQualityRunItem.from_parent(self, name=item_name, spec=spec) - \ No newline at end of file diff --git a/sharktank_models/quality_tests/model_quality_run.py b/sharktank_models/quality_tests/model_quality_run.py index 84ec017..9e20387 100644 --- a/sharktank_models/quality_tests/model_quality_run.py +++ b/sharktank_models/quality_tests/model_quality_run.py @@ -18,6 +18,7 @@ chip = os.getenv("ROCM_CHIP", default="gfx942") sku = os.getenv("SKU", default="mi300") + # Helper methods def fetch_source_fixtures_for_run_flags(inference_list, model_name, submodel_name): result = [] @@ -53,13 +54,14 @@ def common_run_flags_generation(input_list, output_list): class ModelQualityRunItem(pytest.Item): - def __init__(self, spec, **kwargs): super().__init__(**kwargs) self.spec = spec self.model_name = self.spec.model_name self.quality_file_name = self.spec.quality_file_name - SUBMODEL_FILE_PATH = THIS_DIR / f"{self.model_name}/{self.quality_file_name}.json" + SUBMODEL_FILE_PATH = ( + THIS_DIR / f"{self.model_name}/{self.quality_file_name}.json" + ) split_file_name = self.quality_file_name.split("_") self.submodel_name = "_".join(split_file_name[:-1]) self.type_of_backend = split_file_name[-1] @@ -97,8 +99,8 @@ def __init__(self, spec, **kwargs): if data.get("mlir") else None ) - - self.compiler_flags = data.get("compiler_flags", []) + + self.compiler_flags = data.get("compiler_flags", []) self.device = data.get("device") # Setting input, output, and function call arguments @@ -126,9 +128,7 @@ def __init__(self, spec, **kwargs): ) # Custom configuration to fp16 and adding secondary pipeline mlir - self.pipeline_compiler_flags = data.get( - "pipeline_compiler_flags", [] - ) + self.pipeline_compiler_flags = data.get("pipeline_compiler_flags", []) self.pipeline_mlir = ( fetch_source_fixture( data.get("pipeline_mlir"), @@ -138,22 +138,19 @@ def __init__(self, spec, **kwargs): else None ) self.add_pipeline_module = data.get("add_pipeline_module", False) - + if self.type_of_backend == "rocm": self.file_suffix = f"{self.type_of_backend}_{chip}" - self.compiler_flags += [ - f"--iree-hip-target={chip}" - ] + self.compiler_flags += [f"--iree-hip-target={chip}"] self.pipeline_compiler_flags.append(f"--iree-hip-target={chip}") elif self.type_of_backend == "cpu": self.file_suffix = "cpu" - + def runtest(self): self.test_compile() self.test_run_threshold() - def test_compile(self): if chip in self.compile_chip_expecting_to_fail: pytest.xfail( @@ -161,7 +158,9 @@ def test_compile(self): ) vmfbs_path = f"{self.model_name}_{self.submodel_name}_vmfbs" - vmfb_manager_unique_key = f"{self.model_name}_{self.submodel_name}_{self.type_of_backend}_vmfb" + vmfb_manager_unique_key = ( + f"{self.model_name}_{self.submodel_name}_{self.type_of_backend}_vmfb" + ) pytest.vmfb_manager[vmfb_manager_unique_key] = iree_compile( self.mlir, self.compiler_flags, @@ -171,15 +170,15 @@ def test_compile(self): ) if self.pipeline_mlir: - pipeline_vmfb_manager_unique_key = ( - f"{self.model_name}_{self.submodel_name}_pipeline_{self.type_of_backend}_vmfb" - ) + pipeline_vmfb_manager_unique_key = f"{self.model_name}_{self.submodel_name}_pipeline_{self.type_of_backend}_vmfb" pytest.vmfb_manager[pipeline_vmfb_manager_unique_key] = iree_compile( self.pipeline_mlir, self.pipeline_compiler_flags, Path(vmfb_dir) / Path(vmfbs_path) - / Path("pipeline_model").with_suffix(f".{self.type_of_backend}_{chip}.vmfb"), + / Path("pipeline_model").with_suffix( + f".{self.type_of_backend}_{chip}.vmfb" + ), ) def test_run_threshold(self): @@ -196,15 +195,15 @@ def test_run_threshold(self): args.append(f"--parameters=model={self.real_weights.path}") if self.add_pipeline_module: - pipeline_vmfb_manager_unique_key = ( - f"{self.model_name}_{self.submodel_name}_pipeline_{self.type_of_backend}_vmfb" - ) + pipeline_vmfb_manager_unique_key = f"{self.model_name}_{self.submodel_name}_pipeline_{self.type_of_backend}_vmfb" pipeline_module_name = pytest.vmfb_manager.get( pipeline_vmfb_manager_unique_key ) args.append(f"--module={pipeline_module_name}") - vmfb_manager_unique_key = f"{self.model_name}_{self.submodel_name}_{self.type_of_backend}_vmfb" + vmfb_manager_unique_key = ( + f"{self.model_name}_{self.submodel_name}_{self.type_of_backend}_vmfb" + ) return iree_run_module( Path(pytest.vmfb_manager.get(vmfb_manager_unique_key)), device=self.device, @@ -214,6 +213,6 @@ def test_run_threshold(self): def repr_failure(self, excinfo): return super().repr_failure(excinfo) - + def reportinfo(self): return self.path, 0, f"usecase: {self.name}"