Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Don't crash when benchmark contains unknown metric #2985

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion tools/benchcomp/benchcomp/visualizers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import json
import logging
import subprocess
import sys
import textwrap

import jinja2
Expand Down Expand Up @@ -232,12 +233,19 @@ def _organize_results_into_metrics(results):
for bench, bench_result in results["benchmarks"].items():
for variant, variant_result in bench_result["variants"].items():
for metric, value in variant_result["metrics"].items():
if metric not in ret:
ret[metric] = {}
logging.warning(
"Benchmark '%s' contained a metric '%s' in the "
"'%s' variant result that was not declared in "
"the 'metrics' dict. Add '%s: {}' to the metrics "
"dict", bench, metric, variant, metric)
try:
ret[metric][bench][variant] = variant_result["metrics"][metric]
except KeyError:
ret[metric][bench] = {
variant: variant_result["metrics"][metric]
}
}
return ret


Expand Down
68 changes: 68 additions & 0 deletions tools/benchcomp/test/test_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
# tests.

import pathlib
import re
import subprocess
import tempfile
import textwrap
Expand Down Expand Up @@ -834,3 +835,70 @@ def test_run_failing_command_visualization(self):
run_bc()
self.assertNotEqual(
run_bc.proc.returncode, 0, msg=run_bc.stderr)


def test_unknown_metric_in_benchmark(self):
"""Ensure that benchcomp continues with warning if a benchmark result contained an unknown metric"""

with tempfile.TemporaryDirectory() as tmp:
out_file = pathlib.Path(tmp) / str(uuid.uuid4())
run_bc = Benchcomp({
"variants": {
"v1": {
"config": {
"command_line": "true",
"directory": tmp,
}
},
"v2": {
"config": {
"command_line": "true",
"directory": tmp,
}
}
},
"run": {
"suites": {
"suite_1": {
"parser": {
"command": """
echo '{
metrics: {
foo: {},
bar: {},
},
benchmarks: {
bench_1: {
metrics: {
baz: 11
}
}
}
}'
"""
},
"variants": ["v2", "v1"]
}
}
},
"visualize": [{
"type": "dump_markdown_results_table",
"out_file": "-",
"extra_columns": {},
}],
})

output_pat = re.compile(
"Benchmark 'bench_1' contained a metric 'baz' in the 'v1' "
"variant result that was not declared in the 'metrics' dict.")

run_bc()
self.assertRegex(run_bc.stderr, output_pat)

self.assertEqual(run_bc.proc.returncode, 0, msg=run_bc.stderr)

with open(run_bc.working_directory / "result.yaml") as handle:
result = yaml.safe_load(handle)

for item in ["benchmarks", "metrics"]:
self.assertIn(item, result)
Loading