Skip to content

Commit

Permalink
Merge branch 'main' into refactor/runner_limit
Browse files Browse the repository at this point in the history
  • Loading branch information
mtache authored Oct 10, 2024
2 parents d86f85c + 42476d9 commit 6d59aa9
Show file tree
Hide file tree
Showing 14 changed files with 145 additions and 57 deletions.
9 changes: 6 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
---
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
ci:
autoupdate_commit_msg: "ci: pre-commit autoupdate"

files: ^(anta|docs|scripts|tests|asynceapi)/

repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v5.0.0
hooks:
- id: trailing-whitespace
exclude: docs/.*.svg
Expand Down Expand Up @@ -43,7 +46,7 @@ repos:
- '<!--| ~| -->'

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.8
rev: v0.6.9
hooks:
- id: ruff
name: Run Ruff linter
Expand Down Expand Up @@ -97,7 +100,7 @@ repos:
files: ^(anta|tests)/

- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.41.0
rev: v0.42.0
hooks:
- id: markdownlint
name: Check Markdown files style.
Expand Down
9 changes: 4 additions & 5 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
{
"ruff.enable": true,
"ruff.configuration": "pyproject.toml",
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"python.testing.pytestArgs": [
"tests"
],
"python.languageServer": "Pylance",
"githubIssues.issueBranchTitle": "issues/${issueNumber}-${issueTitle}",
"editor.formatOnPaste": true,
"files.trimTrailingWhitespace": true,
"workbench.remoteIndicator.showExtensionRecommendations": true,
"pylint.importStrategy": "fromEnvironment",
"pylint.args": [
"--rcfile=pyproject.toml"
],

}
3 changes: 2 additions & 1 deletion anta/reporter/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from rich.table import Table

from anta import RICH_COLOR_PALETTE, RICH_COLOR_THEME
from anta.tools import convert_categories

if TYPE_CHECKING:
import pathlib
Expand Down Expand Up @@ -125,7 +126,7 @@ def report_all(self, manager: ResultManager, title: str = "All tests results") -
def add_line(result: TestResult) -> None:
state = self._color_result(result.result)
message = self._split_list_to_txt_list(result.messages) if len(result.messages) > 0 else ""
categories = ", ".join(result.categories)
categories = ", ".join(convert_categories(result.categories))
table.add_row(str(result.name), result.test, state, message, result.description, categories)

for result in manager.results:
Expand Down
3 changes: 2 additions & 1 deletion anta/reporter/csv_reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from typing import TYPE_CHECKING

from anta.logger import anta_log_exception
from anta.tools import convert_categories

if TYPE_CHECKING:
import pathlib
Expand Down Expand Up @@ -71,7 +72,7 @@ def convert_to_list(cls, result: TestResult) -> list[str]:
TestResult converted into a list.
"""
message = cls.split_list_to_txt_list(result.messages) if len(result.messages) > 0 else ""
categories = cls.split_list_to_txt_list(result.categories) if len(result.categories) > 0 else "None"
categories = cls.split_list_to_txt_list(convert_categories(result.categories)) if len(result.categories) > 0 else "None"
return [
str(result.name),
result.test,
Expand Down
7 changes: 4 additions & 3 deletions anta/reporter/md_reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from anta.constants import MD_REPORT_TOC
from anta.logger import anta_log_exception
from anta.result_manager.models import AntaTestStatus
from anta.tools import convert_categories

if TYPE_CHECKING:
from collections.abc import Generator
Expand Down Expand Up @@ -238,8 +239,8 @@ def generate_rows(self) -> Generator[str, None, None]:
"""Generate the rows of the summary totals device under test table."""
for device, stat in self.results.device_stats.items():
total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count
categories_skipped = ", ".join(sorted(stat.categories_skipped))
categories_failed = ", ".join(sorted(stat.categories_failed))
categories_skipped = ", ".join(sorted(convert_categories(list(stat.categories_skipped))))
categories_failed = ", ".join(sorted(convert_categories(list(stat.categories_failed))))
yield (
f"| {device} | {total_tests} | {stat.tests_success_count} | {stat.tests_skipped_count} | {stat.tests_failure_count} | {stat.tests_error_count} "
f"| {categories_skipped or '-'} | {categories_failed or '-'} |\n"
Expand Down Expand Up @@ -286,7 +287,7 @@ def generate_rows(self) -> Generator[str, None, None]:
"""Generate the rows of the all test results table."""
for result in self.results.get_results(sort_by=["name", "test"]):
messages = self.safe_markdown(", ".join(result.messages))
categories = ", ".join(result.categories)
categories = ", ".join(convert_categories(result.categories))
yield (
f"| {result.name or '-'} | {categories or '-'} | {result.test or '-'} "
f"| {result.description or '-'} | {self.safe_markdown(result.custom_field) or '-'} | {result.result or '-'} | {messages or '-'} |\n"
Expand Down
4 changes: 0 additions & 4 deletions anta/result_manager/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from functools import cached_property
from itertools import chain

from anta.constants import ACRONYM_CATEGORIES
from anta.result_manager.models import AntaTestStatus, TestResult

from .models import CategoryStats, DeviceStats, TestStats
Expand Down Expand Up @@ -162,9 +161,6 @@ def _update_stats(self, result: TestResult) -> None:
result
TestResult to update the statistics.
"""
result.categories = [
" ".join(word.upper() if word.lower() in ACRONYM_CATEGORIES else word.title() for word in category.split()) for category in result.categories
]
count_attr = f"tests_{result.result}_count"

# Update device stats
Expand Down
23 changes: 23 additions & 0 deletions anta/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from time import perf_counter
from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast

from anta.constants import ACRONYM_CATEGORIES
from anta.custom_types import REGEXP_PATH_MARKERS
from anta.logger import format_td

Expand Down Expand Up @@ -372,3 +373,25 @@ def safe_command(command: str) -> str:
The sanitized command.
"""
return re.sub(rf"{REGEXP_PATH_MARKERS}", "_", command)


def convert_categories(categories: list[str]) -> list[str]:
"""Convert categories for reports.
if the category is part of the defined acronym, transform it to upper case
otherwise capitalize the first letter.
Parameters
----------
categories
A list of categories
Returns
-------
list[str]
The list of converted categories
"""
if isinstance(categories, list):
return [" ".join(word.upper() if word.lower() in ACRONYM_CATEGORIES else word.title() for word in category.split()) for category in categories]
msg = f"Wrong input type '{type(categories)}' for convert_categories."
raise TypeError(msg)
17 changes: 17 additions & 0 deletions tests/benchmark/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,20 @@ def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog:
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
"""Display the total number of ANTA unit test cases used to benchmark."""
terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases")


def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Parametrize inventory for benchmark tests."""
if "inventory" in metafunc.fixturenames:
for marker in metafunc.definition.iter_markers(name="parametrize"):
if "inventory" in marker.args[0]:
# Do not override test function parametrize marker for inventory arg
return
metafunc.parametrize(
"inventory",
[
pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
],
indirect=True,
)
38 changes: 10 additions & 28 deletions tests/benchmark/test_anta.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,27 +22,18 @@
logger = logging.getLogger(__name__)


@pytest.mark.parametrize(
"inventory",
[
pytest.param({"count": 1, "disable_cache": True, "reachable": False}, id="1 device"),
pytest.param({"count": 2, "disable_cache": True, "reachable": False}, id="2 devices"),
],
indirect=True,
)
def test_anta_dry_run(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Test and benchmark ANTA in Dry-Run Mode."""
def test_anta_dry_run(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark ANTA in Dry-Run Mode."""
# Disable logging during ANTA execution to avoid having these function time in benchmarks
logging.disable()

def bench() -> ResultManager:
"""Need to wrap the ANTA Runner to instantiate a new ResultManger for each benchmark run."""
def _() -> ResultManager:
manager = ResultManager()
catalog.clear_indexes()
asyncio.run(main(manager, inventory, catalog, dry_run=True))
event_loop.run_until_complete(main(manager, inventory, catalog, dry_run=True))
return manager

manager = benchmark(bench)
manager = benchmark(_)

logging.disable(logging.NOTSET)
if len(manager.results) != len(inventory) * len(catalog.tests):
Expand All @@ -51,30 +42,21 @@ def bench() -> ResultManager:
logger.info(bench_info)


@pytest.mark.parametrize(
"inventory",
[
pytest.param({"count": 1, "disable_cache": True}, id="1 device"),
pytest.param({"count": 2, "disable_cache": True}, id="2 devices"),
],
indirect=True,
)
@patch("anta.models.AntaTest.collect", collect)
@patch("anta.device.AntaDevice.collect_commands", collect_commands)
@respx.mock # Mock eAPI responses
def test_anta(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Test and benchmark ANTA. Mock eAPI responses."""
def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark ANTA."""
# Disable logging during ANTA execution to avoid having these function time in benchmarks
logging.disable()

def bench() -> ResultManager:
"""Need to wrap the ANTA Runner to instantiate a new ResultManger for each benchmark run."""
def _() -> ResultManager:
manager = ResultManager()
catalog.clear_indexes()
asyncio.run(main(manager, inventory, catalog))
event_loop.run_until_complete(main(manager, inventory, catalog))
return manager

manager = benchmark(bench)
manager = benchmark(_)

logging.disable(logging.NOTSET)

Expand Down
48 changes: 48 additions & 0 deletions tests/benchmark/test_runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Benchmark tests for anta.runner."""

from __future__ import annotations

from typing import TYPE_CHECKING

from anta.result_manager import ResultManager
from anta.runner import get_coroutines, prepare_tests

if TYPE_CHECKING:
from collections import defaultdict

from pytest_codspeed import BenchmarkFixture

from anta.catalog import AntaCatalog, AntaTestDefinition
from anta.device import AntaDevice
from anta.inventory import AntaInventory


def test_prepare_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark `anta.runner.prepare_tests`."""

def _() -> defaultdict[AntaDevice, set[AntaTestDefinition]] | None:
catalog.clear_indexes()
return prepare_tests(inventory=inventory, catalog=catalog, tests=None, tags=None)

selected_tests = benchmark(_)

assert selected_tests is not None
assert len(selected_tests) == len(inventory)
assert sum(len(tests) for tests in selected_tests.values()) == len(inventory) * len(catalog.tests)


def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark `anta.runner.get_coroutines`."""
selected_tests = prepare_tests(inventory=inventory, catalog=catalog, tests=None, tags=None)

assert selected_tests is not None

coroutines = benchmark(lambda: get_coroutines(selected_tests=selected_tests, manager=ResultManager()))
for coros in coroutines:
coros.close()

count = sum(len(tests) for tests in selected_tests.values())
assert count == len(coroutines)
14 changes: 8 additions & 6 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,20 @@
DATA_DIR: Path = Path(__file__).parent.resolve() / "data"


@pytest.fixture(params=[{"count": 1}], ids=["1-reachable-device-without-cache"])
@pytest.fixture
def inventory(request: pytest.FixtureRequest) -> Iterator[AntaInventory]:
"""Generate an ANTA inventory."""
user = "admin"
password = "password" # noqa: S105
disable_cache = request.param.get("disable_cache", True)
reachable = request.param.get("reachable", True)
if "filename" in request.param:
inv = AntaInventory.parse(DATA_DIR / request.param["filename"], username=user, password=password, disable_cache=disable_cache)
params = request.param if hasattr(request, "param") else {}
count = params.get("count", 1)
disable_cache = params.get("disable_cache", True)
reachable = params.get("reachable", True)
if "filename" in params:
inv = AntaInventory.parse(DATA_DIR / params["filename"], username=user, password=password, disable_cache=disable_cache)
else:
inv = AntaInventory()
for i in range(request.param["count"]):
for i in range(count):
inv.add_device(
AsyncEOSDevice(
host=f"device-{i}.anta.arista.com",
Expand Down
3 changes: 2 additions & 1 deletion tests/units/reporter/test_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from anta.reporter.csv_reporter import ReportCsv
from anta.result_manager import ResultManager
from anta.tools import convert_categories


class TestReportCsv:
Expand All @@ -25,7 +26,7 @@ def compare_csv_and_result(self, rows: list[Any], index: int, result_manager: Re
assert rows[index + 1][2] == result_manager.results[index].result
assert rows[index + 1][3] == ReportCsv().split_list_to_txt_list(result_manager.results[index].messages)
assert rows[index + 1][4] == result_manager.results[index].description
assert rows[index + 1][5] == ReportCsv().split_list_to_txt_list(result_manager.results[index].categories)
assert rows[index + 1][5] == ReportCsv().split_list_to_txt_list(convert_categories(result_manager.results[index].categories))

def test_report_csv_generate(
self,
Expand Down
8 changes: 4 additions & 4 deletions tests/units/result_manager/test__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,12 @@ def test_sorted_category_stats(self, list_result_factory: Callable[[int], list[T

result_manager.results = results

# Check the current categories order and name format
expected_order = ["OSPF", "BGP", "VXLAN", "System"]
# Check the current categories order
expected_order = ["ospf", "bgp", "vxlan", "system"]
assert list(result_manager.category_stats.keys()) == expected_order

# Check the sorted categories order and name format
expected_order = ["BGP", "OSPF", "System", "VXLAN"]
# Check the sorted categories order
expected_order = ["bgp", "ospf", "system", "vxlan"]
assert list(result_manager.sorted_category_stats.keys()) == expected_order

@pytest.mark.parametrize(
Expand Down
16 changes: 15 additions & 1 deletion tests/units/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import pytest

from anta.tools import custom_division, get_dict_superset, get_failed_logs, get_item, get_value
from anta.tools import convert_categories, custom_division, get_dict_superset, get_failed_logs, get_item, get_value

TEST_GET_FAILED_LOGS_DATA = [
{"id": 1, "name": "Alice", "age": 30, "email": "[email protected]"},
Expand Down Expand Up @@ -499,3 +499,17 @@ def test_get_item(
def test_custom_division(numerator: float, denominator: float, expected_result: str) -> None:
"""Test custom_division."""
assert custom_division(numerator, denominator) == expected_result


@pytest.mark.parametrize(
("test_input", "expected_raise", "expected_result"),
[
pytest.param([], does_not_raise(), [], id="empty list"),
pytest.param(["bgp", "system", "vlan", "configuration"], does_not_raise(), ["BGP", "System", "VLAN", "Configuration"], id="list with acronyms and titles"),
pytest.param(42, pytest.raises(TypeError, match="Wrong input type"), None, id="wrong input type"),
],
)
def test_convert_categories(test_input: list[str], expected_raise: AbstractContextManager[Exception], expected_result: list[str]) -> None:
"""Test convert_categories."""
with expected_raise:
assert convert_categories(test_input) == expected_result

0 comments on commit 6d59aa9

Please sign in to comment.