Skip to content

Commit

Permalink
Add run_command benchcomp visualization
Browse files Browse the repository at this point in the history
This allows users to write their own custom visualization script to run
after running the benchmarks. Prior to this commit, visualizations had
to be checked into the Kani repository.

When `run_command` is specified as a visualization, benchcomp runs the
specified command and passes the result of the run as a JSON file on
stdin. The command can then process the result however it likes.

This resolves #2518.
  • Loading branch information
karkhaz committed Jun 20, 2023
1 parent 49405b2 commit b9792ec
Show file tree
Hide file tree
Showing 3 changed files with 141 additions and 3 deletions.
1 change: 0 additions & 1 deletion docs/src/SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
- [Performance comparisons](./performance-comparisons.md)
- [`benchcomp` command line](./benchcomp-cli.md)
- [`benchcomp` configuration file](./benchcomp-conf.md)
- [Custom visualizations](./benchcomp-viz.md)
- [Custom parsers](./benchcomp-parse.md)

- [Limitations](./limitations.md)
Expand Down
40 changes: 38 additions & 2 deletions tools/benchcomp/benchcomp/visualizers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@


import dataclasses
import json
import subprocess
import textwrap

import jinja2
Expand All @@ -12,8 +14,42 @@
import benchcomp.visualizers.utils as viz_utils


# TODO The doc comment should appear in the help output, which should list all
# available checks.

@dataclasses.dataclass
class run_command:
"""Run an executable command, passing the result as a JSON file on stdin.
This allows you to write your own visualization, which reads a result file
on stdin and does something with it, e.g. writing out a graph or other
output file.
Sample configuration:
```
visualize:
- type: run_command
command: ./my_visualization.py
```
"""

command: str


def __call__(self, results):
results = json.dumps(results, indent=2)
try:
proc = subprocess.Popen(
self.command, shell=True, text=True, stdin=subprocess.PIPE)
_, _ = proc.communicate(input=results)
except subprocess.CalledProcessError as exc:
logging.warning(
"visualization command '%s' exited with code %d",
self.command, exc.returncode)
except (OSError, subprocess.SubprocessError) as exe:
logging.error(
"visualization command '%s' failed: %s", self.command, str(exe))



@dataclasses.dataclass
class error_on_regression:
Expand Down
103 changes: 103 additions & 0 deletions tools/benchcomp/test/test_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import tempfile
import textwrap
import unittest
import uuid

import yaml

Expand Down Expand Up @@ -737,3 +738,105 @@ def test_command_parser(self):

for item in ["benchmarks", "metrics"]:
self.assertIn(item, result)


def test_run_command_visualization(self):
"""Ensure that the run_command visualization can execute a command"""

with tempfile.TemporaryDirectory() as tmp:
out_file = pathlib.Path(tmp) / str(uuid.uuid4())
run_bc = Benchcomp({
"variants": {
"v1": {
"config": {
"command_line": "true",
"directory": tmp,
}
},
"v2": {
"config": {
"command_line": "true",
"directory": tmp,
}
}
},
"run": {
"suites": {
"suite_1": {
"parser": {
"command": """
echo '{
"benchmarks": {},
"metrics": {}
}'
"""
},
"variants": ["v2", "v1"]
}
}
},
"visualize": [{
"type": "run_command",
"command": f"cat - > {out_file}"
}],
})
run_bc()
self.assertEqual(
run_bc.proc.returncode, 0, msg=run_bc.stderr)

with open(out_file) as handle:
result = yaml.safe_load(handle)

for item in ["benchmarks", "metrics"]:
self.assertIn(item, result)


def test_run_failing_command_visualization(self):
"""Ensure that benchcomp terminates normally even when run_command visualization doesn't"""

with tempfile.TemporaryDirectory() as tmp:
out_file = pathlib.Path(tmp) / str(uuid.uuid4())
run_bc = Benchcomp({
"variants": {
"v1": {
"config": {
"command_line": "true",
"directory": tmp,
}
},
"v2": {
"config": {
"command_line": "true",
"directory": tmp,
}
}
},
"run": {
"suites": {
"suite_1": {
"parser": {
"command": """
echo '{
"benchmarks": {},
"metrics": {}
}'
"""
},
"variants": ["v2", "v1"]
}
}
},
"visualize": [{
"type": "run_command",
"command": f"cat - > {out_file}; false"
}],
})
run_bc()
self.assertEqual(
run_bc.proc.returncode, 0, msg=run_bc.stderr)

with open(out_file) as handle:
result = yaml.safe_load(handle)

for item in ["benchmarks", "metrics"]:
self.assertIn(item, result)

0 comments on commit b9792ec

Please sign in to comment.