Skip to content

Commit

Permalink
Adding pre-commit and one cleanup item (iree-org#82)
Browse files Browse the repository at this point in the history
Quick PR to add pre-commit, pre-commit cleanup and making inputs for
sharktank composite action optional (as some fields will be used, not
all)

---------

Signed-off-by: geomin12 <[email protected]>
  • Loading branch information
geomin12 authored Feb 27, 2025
1 parent a8791d2 commit f88f598
Show file tree
Hide file tree
Showing 14 changed files with 190 additions and 103 deletions.
23 changes: 23 additions & 0 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Copyright 2025 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

name: Lint

on: [pull_request]

permissions:
contents: read

jobs:
pre-commit:
runs-on: ubuntu-24.04
steps:
- name: Checking out repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setting up python
uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
- name: Running pre-commit
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
.python-version
*.venv
.venv
venv

# Python deployment artifacts
*.whl
Expand Down Expand Up @@ -40,6 +41,9 @@ Testing/
*.pt
*.safetensors
*.gguf
artifacts/
compilation_info.json
*.vmfbs

# job summary files
job_summary.json
Expand Down
15 changes: 15 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Pre-commit (https://pre-commit.com) configuration for assorted lint checks.
#
# See https://pre-commit.com/hooks.html for more hooks.

repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: check-merge-conflict

- repo: https://github.com/psf/black
rev: 23.3.0
hooks:
- id: black
name: Run Black to format Python files
30 changes: 19 additions & 11 deletions linalg_ops/attention/generate_e2e_attention_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,14 @@ class ShapesId(enum.Enum):
MEDIUM = "medium"
LARGE = "large"


# Enumerates ways to construct MLIR tensor types.
@enum.unique
class Dynamicity(enum.Enum):
MIXED = "mixed" # Randomly mix '?' and values. Example: tensor<?x4xf32>.
STATIC = "static" # Use fixed values everywhere. Example: tensor<4x6xf32>.


# batch: Batch dimension
# m: M dimension of first and second matmul
# n: N dimension of second matmul
Expand Down Expand Up @@ -90,6 +92,7 @@ def get_test_shapes(shapes_id: ShapesId):

raise ValueError(shapes_id)


# A shape dimension value, i.e. a size value that could appear in a MLIR type
# such as 'tensor<?x4xf32>'. None means a dynamic size, similar to '?' in MLIR.
@dataclasses.dataclass
Expand Down Expand Up @@ -118,6 +121,7 @@ def int_or_question_mark(s: DimSize):
def int_or_DYN(s: DimSize):
return s.value or "DYN"


# Determines the shape of input and kernel tensors.
@dataclasses.dataclass
class TestInputTensorShapes:
Expand All @@ -133,7 +137,6 @@ class TestInputTensorShapes:
# converts from the runtime shape dimensions in TestShape and given dynamicity to
# the set of shapes to be used in a test function's input tensors.
def generate_shapes_and_scale(shape: TestShapeAndScale, dynamicity: Dynamicity):

m = shape_dim(shape.m, dynamicity)
k2 = shape_dim(shape.k2, dynamicity)
if dynamicity == Dynamicity.MIXED:
Expand Down Expand Up @@ -262,18 +265,23 @@ def generate_function(
)
if dynamicity == Dynamicity.MIXED:
func_definition = func_definition + (
f" %c1 = arith.constant 1 : index\n"
f" %m_in = tensor.dim %query, %c1 : {query_tensor_type}\n"
f" %k2_in = tensor.dim %key, %c1 : {key_tensor_type}\n"
f" %m = util.assume.int %m_in<udiv = 16> : index\n"
f" %k2 = util.assume.int %k2_in<udiv = 16> : index\n"
f" %query_mixed = flow.tensor.tie_shape %query : {query_tensor_type}""{%m}\n"
f" %key_mixed = flow.tensor.tie_shape %key : {key_tensor_type}""{%k2}\n"
f" %value_mixed = flow.tensor.tie_shape %value : {value_tensor_type}""{%k2}\n"
f" %result0 = tensor.empty(%m): {result_tensor_type}\n"
f" %c1 = arith.constant 1 : index\n"
f" %m_in = tensor.dim %query, %c1 : {query_tensor_type}\n"
f" %k2_in = tensor.dim %key, %c1 : {key_tensor_type}\n"
f" %m = util.assume.int %m_in<udiv = 16> : index\n"
f" %k2 = util.assume.int %k2_in<udiv = 16> : index\n"
f" %query_mixed = flow.tensor.tie_shape %query : {query_tensor_type}"
"{%m}\n"
f" %key_mixed = flow.tensor.tie_shape %key : {key_tensor_type}"
"{%k2}\n"
f" %value_mixed = flow.tensor.tie_shape %value : {value_tensor_type}"
"{%k2}\n"
f" %result0 = tensor.empty(%m): {result_tensor_type}\n"
)
else:
func_definition = func_definition + ( f" %result0 = tensor.empty(): {result_tensor_type}\n")
func_definition = func_definition + (
f" %result0 = tensor.empty(): {result_tensor_type}\n"
)

func_definition = func_definition + (
f" %scale_f16 = arith.truncf %scale : {F32} to {F16}\n"
Expand Down
5 changes: 1 addition & 4 deletions litert_models/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,7 @@ def compile_mlir_with_iree(
compile_args.extend(compile_flags)
compile_args.extend(["-o", iree_module_path])
compile_cmd = subprocess.list2cmdline(compile_args)
logger.info(
f"Launching compile command:\n" #
f" cd {cwd} && {compile_cmd}"
)
logger.info(f"Launching compile command:\n" f" cd {cwd} && {compile_cmd}") #
ret = subprocess.run(compile_cmd, shell=True, capture_output=True)
if ret.returncode != 0:
logger.error(f"Compilation of '{iree_module_path}' failed")
Expand Down
10 changes: 2 additions & 8 deletions onnx_models/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,10 +251,7 @@ def compile_mlir_with_iree(
compile_args.extend(compile_flags)
compile_args.extend(["-o", iree_module_path.relative_to(cwd)])
compile_cmd = subprocess.list2cmdline(compile_args)
logger.info(
f"Launching compile command:\n" #
f" cd {cwd} && {compile_cmd}"
)
logger.info(f"Launching compile command:\n" f" cd {cwd} && {compile_cmd}") #
ret = subprocess.run(compile_cmd, shell=True, capture_output=True, cwd=cwd)
if ret.returncode != 0:
logger.error(f"Compilation of '{iree_module_path}' failed")
Expand All @@ -271,10 +268,7 @@ def run_iree_module(iree_module_path: Path, run_flags: list[str]):
run_args = ["iree-run-module", f"--module={iree_module_path.relative_to(cwd)}"]
run_args.extend(run_flags)
run_cmd = subprocess.list2cmdline(run_args)
logger.info(
f"Launching run command:\n" #
f" cd {cwd} && {run_cmd}"
)
logger.info(f"Launching run command:\n" f" cd {cwd} && {run_cmd}") #
ret = subprocess.run(run_cmd, shell=True, capture_output=True, cwd=cwd)
if ret.returncode != 0:
logger.error(f"Run of '{iree_module_path}' failed")
Expand Down
5 changes: 1 addition & 4 deletions onnx_models/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,10 +169,7 @@ def import_onnx_model_to_mlir(onnx_path: Path) -> Path:
str(imported_mlir_path),
]
import_cmd = subprocess.list2cmdline(import_args)
logger.info(
f"Running import command:\n" #
f" {import_cmd}"
)
logger.info(f"Running import command:\n" f" {import_cmd}") #
ret = subprocess.run(import_cmd, shell=True, capture_output=True)
if ret.returncode != 0:
logger.error(f"Import of '{onnx_path.name}' failed!")
Expand Down
6 changes: 2 additions & 4 deletions onnx_ops/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,8 +329,7 @@ def runtest(self):
def test_compile(self):
cwd = self.test_cwd
logging.getLogger().info(
f"Launching compile command:\n" #
f"cd {cwd} && {self.compile_cmd}"
f"Launching compile command:\n" f"cd {cwd} && {self.compile_cmd}" #
)
proc = subprocess.run(
self.compile_cmd, shell=True, capture_output=True, cwd=cwd
Expand All @@ -346,8 +345,7 @@ def test_compile(self):
def test_run(self):
cwd = self.test_cwd
logging.getLogger().info(
f"Launching run command:\n" #
f"cd {cwd} && {self.run_cmd}"
f"Launching run command:\n" f"cd {cwd} && {self.run_cmd}" #
)
proc = subprocess.run(self.run_cmd, shell=True, capture_output=True, cwd=cwd)
if proc.returncode != 0:
Expand Down
5 changes: 0 additions & 5 deletions sharktank_models/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,16 @@ name: IREE Sharktank Models Tests
inputs:
MATRIX_NAME:
description: "Identifying name of the matrix variation"
required: true
SKU:
description: "Type of SKU to test"
required: true
CHIP:
description: "Type of chip to test"
required: true
VENV_DIR:
description: "Path to the virtual environment"
TARGET:
description: "Type of target to test"
required: true
GPU:
description: "Type of GPU to test"
required: true

runs:
using: "composite"
Expand Down
63 changes: 44 additions & 19 deletions sharktank_models/benchmarks/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,53 +18,78 @@

logger = logging.getLogger(__name__)


def pytest_sessionstart(session):
with open("job_summary.md", "a") as job_summary, open("job_summary.json", "w+") as content:
with open("job_summary.md", "a") as job_summary, open(
"job_summary.json", "w+"
) as content:
print(f"{sku.upper()} Complete Benchmark Summary:\n", file=job_summary)
json.dump({}, content)

logger.info("Pytest benchmark test session is starting")



def pytest_sessionfinish(session, exitstatus):
markdown_data = {
"time_summary": ["Model name", "Submodel name", "Current time (ms)", "Expected/golden time (ms)"],
"dispatch_summary": ["Model name", "Submodel name", "Current dispatch count", "Expected/golden dispatch count"],
"size_summary": ["Model name", "Submodel name", "Current binary size (bytes)", "Expected/golden binary size (bytes)"]
"time_summary": [
"Model name",
"Submodel name",
"Current time (ms)",
"Expected/golden time (ms)",
],
"dispatch_summary": [
"Model name",
"Submodel name",
"Current dispatch count",
"Expected/golden dispatch count",
],
"size_summary": [
"Model name",
"Submodel name",
"Current binary size (bytes)",
"Expected/golden binary size (bytes)",
],
}

with open("job_summary.md", "a") as job_summary, open("job_summary.json", "r") as content:

with open("job_summary.md", "a") as job_summary, open(
"job_summary.json", "r"
) as content:
summary_data = json.loads(content.read())
for key, value in markdown_data.items():
if key in summary_data:
table_data = tabulate.tabulate(
summary_data.get(key), headers=value, tablefmt="pipe"
)
print("\n" + table_data, file=job_summary)

logger.info("Pytest benchmark test session has finished")


def pytest_collect_file(parent, file_path):
if file_path.suffix == ".json" and "job_summary" not in file_path.name and "benchmarks" in str(THIS_DIR):
if (
file_path.suffix == ".json"
and "job_summary" not in file_path.name
and "benchmarks" in str(THIS_DIR)
):
return SharkTankModelBenchmarkTests.from_parent(parent, path=file_path)

@dataclass(frozen = True)

@dataclass(frozen=True)
class BenchmarkTestSpec:
model_name: str
benchmark_file_name: str



class SharkTankModelBenchmarkTests(pytest.File):

def collect(self):
path = str(self.path).split("/")
benchmark_file_name = path[-1].replace(".json", "")
model_name = path[-2]

item_name = f"{model_name} :: {benchmark_file_name}"

spec = BenchmarkTestSpec(
model_name = model_name,
benchmark_file_name = benchmark_file_name
model_name=model_name, benchmark_file_name=benchmark_file_name
)

yield ModelBenchmarkRunItem.from_parent(self, name=item_name, spec=spec)

Loading

0 comments on commit f88f598

Please sign in to comment.