diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 8ff6608445a9..d4609a87c23b 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -35,7 +35,6 @@ - [Performance comparisons](./performance-comparisons.md) - [`benchcomp` command line](./benchcomp-cli.md) - [`benchcomp` configuration file](./benchcomp-conf.md) - - [Custom visualizations](./benchcomp-viz.md) - [Custom parsers](./benchcomp-parse.md) - [Limitations](./limitations.md) diff --git a/docs/src/benchcomp-conf.md b/docs/src/benchcomp-conf.md index 489ae34a5550..77236d0917bf 100644 --- a/docs/src/benchcomp-conf.md +++ b/docs/src/benchcomp-conf.md @@ -1,7 +1,7 @@ # `benchcomp` configuration file `benchcomp`'s operation is controlled through a YAML file---`benchcomp.yaml` by default or a file passed to the `-c/--config` option. -This page describes the file's schema and lists the different parsers and visualizations that are available. +This page lists the different visualizations that are available. ## Built-in visualizations diff --git a/tools/benchcomp/benchcomp/visualizers/__init__.py b/tools/benchcomp/benchcomp/visualizers/__init__.py index da9effcba426..af973379f19d 100644 --- a/tools/benchcomp/benchcomp/visualizers/__init__.py +++ b/tools/benchcomp/benchcomp/visualizers/__init__.py @@ -3,6 +3,8 @@ import dataclasses +import json +import subprocess import textwrap import jinja2 @@ -12,8 +14,44 @@ import benchcomp.visualizers.utils as viz_utils -# TODO The doc comment should appear in the help output, which should list all -# available checks. + +@dataclasses.dataclass +class run_command: + """Run an executable command, passing the performance metrics as JSON on stdin. + + This allows you to write your own visualization, which reads a result file + on stdin and does something with it, e.g. writing out a graph or other + output file. + + Sample configuration: + + ``` + visualize: + - type: run_command + command: ./my_visualization.py + ``` + """ + + command: str + + + def __call__(self, results): + results = json.dumps(results, indent=2) + try: + proc = subprocess.Popen( + self.command, shell=True, text=True, stdin=subprocess.PIPE) + _, _ = proc.communicate(input=results) + except (OSError, subprocess.SubprocessError) as exe: + logging.error( + "visualization command '%s' failed: %s", self.command, str(exe)) + viz_utils.EXIT_CODE = 1 + if proc.returncode: + logging.error( + "visualization command '%s' exited with code %d", + self.command, proc.returncode) + viz_utils.EXIT_CODE = 1 + + @dataclasses.dataclass class error_on_regression: diff --git a/tools/benchcomp/test/test_regression.py b/tools/benchcomp/test/test_regression.py index a5b02a5b09b1..c5cb61ae8190 100644 --- a/tools/benchcomp/test/test_regression.py +++ b/tools/benchcomp/test/test_regression.py @@ -10,6 +10,7 @@ import tempfile import textwrap import unittest +import uuid import yaml @@ -737,3 +738,99 @@ def test_command_parser(self): for item in ["benchmarks", "metrics"]: self.assertIn(item, result) + + + def test_run_command_visualization(self): + """Ensure that the run_command visualization can execute a command""" + + with tempfile.TemporaryDirectory() as tmp: + out_file = pathlib.Path(tmp) / str(uuid.uuid4()) + run_bc = Benchcomp({ + "variants": { + "v1": { + "config": { + "command_line": "true", + "directory": tmp, + } + }, + "v2": { + "config": { + "command_line": "true", + "directory": tmp, + } + } + }, + "run": { + "suites": { + "suite_1": { + "parser": { + "command": """ + echo '{ + "benchmarks": {}, + "metrics": {} + }' + """ + }, + "variants": ["v2", "v1"] + } + } + }, + "visualize": [{ + "type": "run_command", + "command": f"cat - > {out_file}" + }], + }) + run_bc() + self.assertEqual( + run_bc.proc.returncode, 0, msg=run_bc.stderr) + + with open(out_file) as handle: + result = yaml.safe_load(handle) + + for item in ["benchmarks", "metrics"]: + self.assertIn(item, result) + + + def test_run_failing_command_visualization(self): + """Ensure that benchcomp terminates with a non-zero return code when run_command visualization fails""" + + with tempfile.TemporaryDirectory() as tmp: + out_file = pathlib.Path(tmp) / str(uuid.uuid4()) + run_bc = Benchcomp({ + "variants": { + "v1": { + "config": { + "command_line": "true", + "directory": tmp, + } + }, + "v2": { + "config": { + "command_line": "true", + "directory": tmp, + } + } + }, + "run": { + "suites": { + "suite_1": { + "parser": { + "command": """ + echo '{ + "benchmarks": {}, + "metrics": {} + }' + """ + }, + "variants": ["v2", "v1"] + } + } + }, + "visualize": [{ + "type": "run_command", + "command": f"cat - > {out_file}; false" + }], + }) + run_bc() + self.assertNotEqual( + run_bc.proc.returncode, 0, msg=run_bc.stderr)