Skip to content

Commit

Permalink
Add run_command benchcomp visualization (#2542)
Browse files Browse the repository at this point in the history
This allows users to write their own custom visualization script to run
after running the benchmarks. Prior to this commit, visualizations had
to be checked into the Kani repository.

When run_command is specified as a visualization, benchcomp runs the
specified command and passes the result of the run as a JSON file on
stdin. The command can then process the result however it likes.

This resolves #2518.
  • Loading branch information
karkhaz authored Jun 26, 2023
1 parent 987c9ce commit d4a624f
Show file tree
Hide file tree
Showing 4 changed files with 138 additions and 4 deletions.
1 change: 0 additions & 1 deletion docs/src/SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
- [Performance comparisons](./performance-comparisons.md)
- [`benchcomp` command line](./benchcomp-cli.md)
- [`benchcomp` configuration file](./benchcomp-conf.md)
- [Custom visualizations](./benchcomp-viz.md)
- [Custom parsers](./benchcomp-parse.md)

- [Limitations](./limitations.md)
Expand Down
2 changes: 1 addition & 1 deletion docs/src/benchcomp-conf.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# `benchcomp` configuration file

`benchcomp`'s operation is controlled through a YAML file---`benchcomp.yaml` by default or a file passed to the `-c/--config` option.
This page describes the file's schema and lists the different parsers and visualizations that are available.
This page lists the different visualizations that are available.


## Built-in visualizations
Expand Down
42 changes: 40 additions & 2 deletions tools/benchcomp/benchcomp/visualizers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@


import dataclasses
import json
import subprocess
import textwrap

import jinja2
Expand All @@ -12,8 +14,44 @@
import benchcomp.visualizers.utils as viz_utils


# TODO The doc comment should appear in the help output, which should list all
# available checks.

@dataclasses.dataclass
class run_command:
"""Run an executable command, passing the performance metrics as JSON on stdin.
This allows you to write your own visualization, which reads a result file
on stdin and does something with it, e.g. writing out a graph or other
output file.
Sample configuration:
```
visualize:
- type: run_command
command: ./my_visualization.py
```
"""

command: str


def __call__(self, results):
results = json.dumps(results, indent=2)
try:
proc = subprocess.Popen(
self.command, shell=True, text=True, stdin=subprocess.PIPE)
_, _ = proc.communicate(input=results)
except (OSError, subprocess.SubprocessError) as exe:
logging.error(
"visualization command '%s' failed: %s", self.command, str(exe))
viz_utils.EXIT_CODE = 1
if proc.returncode:
logging.error(
"visualization command '%s' exited with code %d",
self.command, proc.returncode)
viz_utils.EXIT_CODE = 1



@dataclasses.dataclass
class error_on_regression:
Expand Down
97 changes: 97 additions & 0 deletions tools/benchcomp/test/test_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import tempfile
import textwrap
import unittest
import uuid

import yaml

Expand Down Expand Up @@ -737,3 +738,99 @@ def test_command_parser(self):

for item in ["benchmarks", "metrics"]:
self.assertIn(item, result)


def test_run_command_visualization(self):
"""Ensure that the run_command visualization can execute a command"""

with tempfile.TemporaryDirectory() as tmp:
out_file = pathlib.Path(tmp) / str(uuid.uuid4())
run_bc = Benchcomp({
"variants": {
"v1": {
"config": {
"command_line": "true",
"directory": tmp,
}
},
"v2": {
"config": {
"command_line": "true",
"directory": tmp,
}
}
},
"run": {
"suites": {
"suite_1": {
"parser": {
"command": """
echo '{
"benchmarks": {},
"metrics": {}
}'
"""
},
"variants": ["v2", "v1"]
}
}
},
"visualize": [{
"type": "run_command",
"command": f"cat - > {out_file}"
}],
})
run_bc()
self.assertEqual(
run_bc.proc.returncode, 0, msg=run_bc.stderr)

with open(out_file) as handle:
result = yaml.safe_load(handle)

for item in ["benchmarks", "metrics"]:
self.assertIn(item, result)


def test_run_failing_command_visualization(self):
"""Ensure that benchcomp terminates with a non-zero return code when run_command visualization fails"""

with tempfile.TemporaryDirectory() as tmp:
out_file = pathlib.Path(tmp) / str(uuid.uuid4())
run_bc = Benchcomp({
"variants": {
"v1": {
"config": {
"command_line": "true",
"directory": tmp,
}
},
"v2": {
"config": {
"command_line": "true",
"directory": tmp,
}
}
},
"run": {
"suites": {
"suite_1": {
"parser": {
"command": """
echo '{
"benchmarks": {},
"metrics": {}
}'
"""
},
"variants": ["v2", "v1"]
}
}
},
"visualize": [{
"type": "run_command",
"command": f"cat - > {out_file}; false"
}],
})
run_bc()
self.assertNotEqual(
run_bc.proc.returncode, 0, msg=run_bc.stderr)

0 comments on commit d4a624f

Please sign in to comment.