Skip to content

Commit

Permalink
test(anta): Add benchmarks for reporters (#971)
Browse files Browse the repository at this point in the history
  • Loading branch information
mtache authored Dec 18, 2024
1 parent 26bf5fa commit 45a62d7
Show file tree
Hide file tree
Showing 10 changed files with 176 additions and 50 deletions.
2 changes: 1 addition & 1 deletion anta/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ class AsyncEOSDevice(AntaDevice):
"""

def __init__(
def __init__( # noqa: PLR0913
self,
host: str,
username: str,
Expand Down
12 changes: 11 additions & 1 deletion anta/result_manager/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from collections import defaultdict
from functools import cached_property
from itertools import chain
from typing import Any

from anta.result_manager.models import AntaTestStatus, TestResult

Expand Down Expand Up @@ -89,6 +90,10 @@ def __init__(self) -> None:
If the status of the added test is error, the status is untouched and the
error_status is set to True.
"""
self.reset()

def reset(self) -> None:
"""Create or reset the attributes of the ResultManager instance."""
self._result_entries: list[TestResult] = []
self.status: AntaTestStatus = AntaTestStatus.UNSET
self.error_status = False
Expand Down Expand Up @@ -122,10 +127,15 @@ def results(self, value: list[TestResult]) -> None:
for result in value:
self.add(result)

@property
def dump(self) -> list[dict[str, Any]]:
"""Get a list of dictionary of the results."""
return [result.model_dump() for result in self._result_entries]

@property
def json(self) -> str:
"""Get a JSON representation of the results."""
return json.dumps([result.model_dump() for result in self._result_entries], indent=4)
return json.dumps(self.dump, indent=4)

@property
def sorted_category_stats(self) -> dict[str, CategoryStats]:
Expand Down
2 changes: 1 addition & 1 deletion anta/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio


@cprofile()
async def main( # noqa: PLR0913
async def main(
manager: ResultManager,
inventory: AntaInventory,
catalog: AntaCatalog,
Expand Down
4 changes: 2 additions & 2 deletions asynceapi/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ async def check_connection(self) -> bool:
"""
return await port_check_url(self.base_url)

async def cli( # noqa: PLR0913
async def cli(
self,
command: str | dict[str, Any] | None = None,
commands: Sequence[str | dict[str, Any]] | None = None,
Expand Down Expand Up @@ -195,7 +195,7 @@ async def cli( # noqa: PLR0913
return None
raise

def _jsonrpc_command( # noqa: PLR0913
def _jsonrpc_command(
self,
commands: Sequence[str | dict[str, Any]] | None = None,
ofmt: str | None = None,
Expand Down
13 changes: 4 additions & 9 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -367,6 +367,7 @@ convention = "numpy"
# we have not removed pylint completely, these settings should be kept in sync with our pylintrc file.
# https://github.com/astral-sh/ruff/issues/970
max-branches = 13
max-args = 10

[tool.ruff.lint.mccabe]
# Unlike Flake8, default to a complexity level of 10.
Expand All @@ -377,6 +378,7 @@ max-complexity = 10
"RICH_COLOR_PALETTE"
]


[tool.ruff.lint.flake8-type-checking]
# These classes require that type annotations be available at runtime
runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.Input"]
Expand All @@ -390,7 +392,6 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In
"tests/units/*" = [
"ARG002", # Sometimes we need to declare unused arguments when a parameter is not used but declared in @pytest.mark.parametrize
"FBT001", # Boolean-typed positional argument in function definition
"PLR0913", # Too many arguments to function call
"PLR2004", # Magic value used in comparison, consider replacing {value} with a constant variable
"S105", # Passwords are indeed hardcoded in tests
"S106", # Passwords are indeed hardcoded in tests
Expand All @@ -412,7 +413,7 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In
"T201", # Allow print statements
]
"anta/cli/*" = [
"PLR0913", # Allow more than 5 input arguments in CLI functions
"PLR0913", # CLI has many arguments defined in functions
"ANN401", # TODO: Check if we can update the Any type hints in the CLI
]
"anta/tests/field_notices.py" = [
Expand All @@ -429,13 +430,6 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In
]
"anta/tools.py" = [
"ANN401", # Ok to use Any type hint in our custom get functions
"PLR0913", # Ok to have more than 5 arguments in our custom get functions
]
"anta/device.py" = [
"PLR0913", # Ok to have more than 5 arguments in the AntaDevice classes
]
"anta/inventory/__init__.py" = [
"PLR0913", # Ok to have more than 5 arguments in the AntaInventory class
]
"examples/*.py" = [ # These are example scripts and linked in snippets
"S105", # Possible hardcoded password
Expand Down Expand Up @@ -470,6 +464,7 @@ disable = [ # Any rule listed here can be disabled: https://github.com/astral-sh
"reimported",
"wrong-import-order",
"wrong-import-position",
"unnecessary-lambda",
"abstract-class-instantiated", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-instantiation-of-abstract-classes-abstract
"unexpected-keyword-arg", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg and other rules
"no-value-for-parameter" # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg
Expand Down
35 changes: 31 additions & 4 deletions tests/benchmark/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,27 @@
"""Fixtures for benchmarking ANTA."""

import logging
from collections import defaultdict

import pytest
import respx
from _pytest.terminal import TerminalReporter

from anta.catalog import AntaCatalog
from anta.result_manager import ResultManager

from .utils import AntaMockEnvironment

logger = logging.getLogger(__name__)

TEST_CASE_COUNT = None

# Used to globally configure the benchmarks by specifying parameters for inventories
BENCHMARK_PARAMETERS = [
pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
]


@pytest.fixture(name="anta_mock_env", scope="session") # We want this fixture to have a scope set to session to avoid reparsing all the unit tests data.
def anta_mock_env_fixture() -> AntaMockEnvironment:
Expand All @@ -35,6 +43,22 @@ def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog:
return anta_mock_env.catalog


@pytest.fixture(name="session_results", scope="session") # We want this fixture to be reused across test modules within tests/benchmark
def session_results_fixture() -> defaultdict[str, ResultManager]:
"""Return a dictionary of ResultManger objects for the benchmarks.
The key is the test id as defined in the pytest_generate_tests in this module.
Used to pass a populated ResultManager from one benchmark to another.
"""
return defaultdict(lambda: ResultManager())


@pytest.fixture
def results(request: pytest.FixtureRequest, session_results: defaultdict[str, ResultManager]) -> ResultManager:
"""Return the unique ResultManger object for the current benchmark parameter."""
return session_results[request.node.callspec.id]


def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
"""Display the total number of ANTA unit test cases used to benchmark."""
terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases")
Expand All @@ -49,9 +73,12 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
return
metafunc.parametrize(
"inventory",
[
pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
],
BENCHMARK_PARAMETERS,
indirect=True,
)
elif "results" in metafunc.fixturenames:
metafunc.parametrize(
"results",
BENCHMARK_PARAMETERS,
indirect=True,
)
71 changes: 44 additions & 27 deletions tests/benchmark/test_anta.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import asyncio
import logging
from collections import defaultdict
from unittest.mock import patch

import pytest
Expand All @@ -22,45 +23,61 @@
logger = logging.getLogger(__name__)


def test_anta_dry_run(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
def test_anta_dry_run(
benchmark: BenchmarkFixture,
event_loop: asyncio.AbstractEventLoop,
catalog: AntaCatalog,
inventory: AntaInventory,
request: pytest.FixtureRequest,
session_results: defaultdict[str, ResultManager],
) -> None:
"""Benchmark ANTA in Dry-Run Mode."""
# Disable logging during ANTA execution to avoid having these function time in benchmarks
logging.disable()

def _() -> ResultManager:
manager = ResultManager()
catalog.clear_indexes()
event_loop.run_until_complete(main(manager, inventory, catalog, dry_run=True))
return manager
results = session_results[request.node.callspec.id]

manager = benchmark(_)
@benchmark
def _() -> None:
results.reset()
catalog.clear_indexes()
event_loop.run_until_complete(main(results, inventory, catalog, dry_run=True))

logging.disable(logging.NOTSET)
if len(manager.results) != len(inventory) * len(catalog.tests):
pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(manager.results)}", pytrace=False)
bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(manager.results)}\n" "-----------------------------------------------"

if len(results.results) != len(inventory) * len(catalog.tests):
pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(results.results)}", pytrace=False)
bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(results.results)}\n" "-----------------------------------------------"
logger.info(bench_info)


@patch("anta.models.AntaTest.collect", collect)
@patch("anta.device.AntaDevice.collect_commands", collect_commands)
@pytest.mark.dependency(name="anta_benchmark", scope="package")
@respx.mock # Mock eAPI responses
def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
def test_anta(
benchmark: BenchmarkFixture,
event_loop: asyncio.AbstractEventLoop,
catalog: AntaCatalog,
inventory: AntaInventory,
request: pytest.FixtureRequest,
session_results: defaultdict[str, ResultManager],
) -> None:
"""Benchmark ANTA."""
# Disable logging during ANTA execution to avoid having these function time in benchmarks
logging.disable()

def _() -> ResultManager:
manager = ResultManager()
catalog.clear_indexes()
event_loop.run_until_complete(main(manager, inventory, catalog))
return manager
results = session_results[request.node.callspec.id]

manager = benchmark(_)
@benchmark
def _() -> None:
results.reset()
catalog.clear_indexes()
event_loop.run_until_complete(main(results, inventory, catalog))

logging.disable(logging.NOTSET)

if len(catalog.tests) * len(inventory) != len(manager.results):
if len(catalog.tests) * len(inventory) != len(results.results):
# This could mean duplicates exist.
# TODO: consider removing this code and refactor unit test data as a dictionary with tuple keys instead of a list
seen = set()
Expand All @@ -74,17 +91,17 @@ def _() -> ResultManager:
for test in dupes:
msg = f"Found duplicate in test catalog: {test}"
logger.error(msg)
pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(manager.results)}", pytrace=False)
pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(results.results)}", pytrace=False)
bench_info = (
"\n--- ANTA NRFU Benchmark Information ---\n"
f"Test results: {len(manager.results)}\n"
f"Success: {manager.get_total_results({AntaTestStatus.SUCCESS})}\n"
f"Failure: {manager.get_total_results({AntaTestStatus.FAILURE})}\n"
f"Skipped: {manager.get_total_results({AntaTestStatus.SKIPPED})}\n"
f"Error: {manager.get_total_results({AntaTestStatus.ERROR})}\n"
f"Unset: {manager.get_total_results({AntaTestStatus.UNSET})}\n"
f"Test results: {len(results.results)}\n"
f"Success: {results.get_total_results({AntaTestStatus.SUCCESS})}\n"
f"Failure: {results.get_total_results({AntaTestStatus.FAILURE})}\n"
f"Skipped: {results.get_total_results({AntaTestStatus.SKIPPED})}\n"
f"Error: {results.get_total_results({AntaTestStatus.ERROR})}\n"
f"Unset: {results.get_total_results({AntaTestStatus.UNSET})}\n"
"---------------------------------------"
)
logger.info(bench_info)
assert manager.get_total_results({AntaTestStatus.ERROR}) == 0
assert manager.get_total_results({AntaTestStatus.UNSET}) == 0
assert results.get_total_results({AntaTestStatus.ERROR}) == 0
assert results.get_total_results({AntaTestStatus.UNSET}) == 0
71 changes: 71 additions & 0 deletions tests/benchmark/test_reporter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Benchmark tests for anta.reporter."""

import json
import logging
from pathlib import Path

import pytest

from anta.reporter import ReportJinja, ReportTable
from anta.reporter.csv_reporter import ReportCsv
from anta.reporter.md_reporter import MDReportGenerator
from anta.result_manager import ResultManager

logger = logging.getLogger(__name__)

DATA_DIR: Path = Path(__file__).parents[1].resolve() / "data"


@pytest.mark.benchmark
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
def test_table_all(results: ResultManager) -> None:
"""Benchmark ReportTable.report_all()."""
reporter = ReportTable()
reporter.report_all(results)


@pytest.mark.benchmark
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
def test_table_devices(results: ResultManager) -> None:
"""Benchmark ReportTable.report_summary_devices()."""
reporter = ReportTable()
reporter.report_summary_devices(results)


@pytest.mark.benchmark
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
def test_table_tests(results: ResultManager) -> None:
"""Benchmark ReportTable.report_summary_tests()."""
reporter = ReportTable()
reporter.report_summary_tests(results)


@pytest.mark.benchmark
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
def test_json(results: ResultManager) -> None:
"""Benchmark JSON report."""
assert isinstance(results.json, str)


@pytest.mark.benchmark
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
def test_jinja(results: ResultManager) -> None:
"""Benchmark ReportJinja."""
assert isinstance(ReportJinja(template_path=DATA_DIR / "template.j2").render(json.loads(results.json)), str)


@pytest.mark.benchmark
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
def test_csv(results: ResultManager, tmp_path: Path) -> None:
"""Benchmark ReportCsv.generate()."""
ReportCsv.generate(results=results, csv_filename=tmp_path / "report.csv")


@pytest.mark.benchmark
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
def test_markdown(results: ResultManager, tmp_path: Path) -> None:
"""Benchmark MDReportGenerator.generate()."""
MDReportGenerator.generate(results=results, md_filename=tmp_path / "report.md")
Loading

0 comments on commit 45a62d7

Please sign in to comment.