diff --git a/.gitattributes b/.gitattributes index d0cab7ecf..66ccad124 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,2 @@ -*.v linguist-detectable=false \ No newline at end of file +openlane/smoke_test_design/* linguist-detectable=false +.github/* linguist-detectable=false \ No newline at end of file diff --git a/.github/actions/build_nix/action.yml b/.github/actions/build_nix/action.yml index 0443e1d29..ba90a68e4 100644 --- a/.github/actions/build_nix/action.yml +++ b/.github/actions/build_nix/action.yml @@ -35,7 +35,7 @@ runs: shell: ${{ inputs.shell }} run: | echo "#################################################################" - nix-shell ${{ inputs.nix_build_args }} --run "pytest --step-rx '.'" + nix-shell ${{ inputs.nix_build_args }} --run "pytest --step-rx '.' -n auto" - name: Smoke Test shell: ${{ inputs.shell }} run: | diff --git a/.github/scripts/compare_main.js b/.github/scripts/compare_main.js new file mode 100644 index 000000000..52b8a99f5 --- /dev/null +++ b/.github/scripts/compare_main.js @@ -0,0 +1,42 @@ +#!/usr/bin/env node +// Copyright 2023 Efabless Corporation +// +// Licensed under the Apache License, Version 2.0(the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/** + * @param {string} verbosity + * @param {string} token + * @returns {string} + */ +function get(verbosity, table_out = null, token = "") { + const { spawnSync } = require("child_process"); + + let tableOutOpts = table_out ? ["--table-out", table_out] : []; + + let child = spawnSync("python3", ["-m", "openlane.common.metrics", "compare-main", "current", "--table-verbosity", verbosity, "--token", token].concat(tableOutOpts), { encoding: "utf8" }); + + let result = ""; + if (child.status != 0) { + throw new Error("Failed to create report: \n\n```\n" + child.stderr + "\n```"); + } else { + result += "Metric comparisons are in beta. Please report bugs under the issues tab.\n---\n"; + result += "> To create this report yourself, grab the metrics artifact from the CI run, extract them, and invoke `python3 -m openlane.common.metrics compare-main `.\n\n" + child.stdout; + } + + return result.trim(); +}; + +module.exports = get; + +if (require.main === module) { + console.log(get("ALL", "table.md", process.argv[2])); +} diff --git a/.github/scripts/post_metrics.js b/.github/scripts/post_metrics.js new file mode 100644 index 000000000..b8b8aff3f --- /dev/null +++ b/.github/scripts/post_metrics.js @@ -0,0 +1,129 @@ +#!/usr/bin/env node +// Copyright 2023 Efabless Corporation +// +// Licensed under the Apache License, Version 2.0(the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const compareMain = require("./compare_main.js"); + +/** + * + * @param {Octokit} github + * @param {object} context + * @param {string} botUsername + * @param {string} body + */ +async function postOrUpdateComment(github, context, botUsername, body) { + const METRIC_REPORT_MARK = ""; + + let page = 1; + let allComments = []; + let comments = []; + do { + let response = await github.request("GET /repos/{owner}/{repo}/issues/{issue_number}/comments?page={page}&per_page=100", { + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + page, + }); + comments = response.data; + allComments = allComments.concat(comments); + page += 1; + } while (comments.length != 0); + + let found = null; + for (let comment of allComments) { + if (comment.body.includes(METRIC_REPORT_MARK) && comment.user.login == botUsername) { + found = comment; + break; + } + } + + let fn = github.rest.issues.createComment; + let request = { + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `${METRIC_REPORT_MARK}\n\n${body}` + }; + + if (found) { + request.comment_id = found.id; + fn = github.rest.issues.updateComment; + } + + await fn(request); +} + +/** + * + * @param {Octokit} github + * @param {object} context + * @param {string} botUsername + * @param {string} botToken + */ +async function main(github, context, botUsername, botToken) { + const fs = require("fs"); + + let body; + try { + body = compareMain("ALL", "./tables_all.md", botToken); + let tables = fs.readFileSync("./tables_all.md", { encoding: "utf8" }); + + let gistResponse = await github.rest.gists.create({ + public: true, + description: `Results for ${context.repo.owner} / ${context.repo.repo}#${context.issue.number} (Run ${context.runId})`, + files: { + "10-ALL.md": { + content: tables + } + } + }); + + body += `\n\nFull tables ► ${gistResponse.data.html_url}\n`; + } catch (e) { + body = e.message; + console.error(e.message) + } + + await postOrUpdateComment(github, context, botUsername, body) +} + +module.exports = main; + +if (require.main === module) { + // Test + try { + require("@octokit/rest"); + } catch (error) { + console.error("Run 'yarn add @octokit/rest @octokit/plugin-paginate-rest'") + process.exit(-1); + } + const { Octokit } = require("@octokit/rest"); + + const context = { + repo: { + owner: "efabless", + repo: "openlane2" + }, + issue: { + number: process.argv[3] + }, + runId: "api_test" + }; + + let octokit = new Octokit({ + auth: process.argv[2], + }); + + main(octokit, context, "openlane-bot", process.argv[2]); +} \ No newline at end of file diff --git a/.github/test_sets/get_test_matrix.py b/.github/test_sets/get_test_matrix.py index fed252af3..1e591757e 100755 --- a/.github/test_sets/get_test_matrix.py +++ b/.github/test_sets/get_test_matrix.py @@ -65,7 +65,7 @@ def main(scls, use_json, test_sets): ol_dir, "test", "designs", design_name, config_filename ) run_folder = os.path.join( - ol_dir, "test", "designs", design_name, "runs", "CI" + ol_dir, "test", "designs", design_name, "runs", f"{pdk}-{scl}" ) designs.append( { diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d7001bc92..a8a6df15c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -266,7 +266,7 @@ jobs: then nix-shell --run "\ python3 -m openlane\ - --run-tag CI\ + --run-tag ${{ matrix.design.pdk }}-${{ matrix.design.scl }}\ --pdk ${{ matrix.design.pdk }}\ --scl ${{matrix.design.scl}}\ ${{ matrix.design.config }}\ @@ -282,6 +282,67 @@ jobs: with: name: ${{ matrix.design.name }}-${{ matrix.design.pdk }}-${{ matrix.design.scl }} path: ${{ matrix.design.run_folder }} + - name: Fetch Metrics + if: ${{ always() }} + run: | + nix-shell --run "\ + python3 -m openlane.state latest\ + ${{ matrix.design.run_folder }}\ + --extract-metrics-to ${{ matrix.design.pdk }}-${{ matrix.design.scl }}-${{ matrix.design.name }}.metrics.json\ + " + - name: Upload Metrics + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: metrics + path: ${{ matrix.design.pdk }}-${{ matrix.design.scl }}-${{ matrix.design.name }}.metrics.json + upload_metrics: + name: Upload Metrics + runs-on: ubuntu-22.04 + needs: [test] + if: ${{ always() }} + steps: + - name: Check out repo + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Setup + run: | + sudo apt-get install -y python3-tk + python3 -m pip install -e . + echo "BRANCH_NAME=${GITHUB_REF##*/}" >> $GITHUB_ENV + - name: Download Metrics + uses: actions/download-artifact@v3 + with: + name: metrics + path: current + - name: "[PRs] Compare to main and comment result" + uses: actions/github-script@v6 + if: ${{ github.event_name == 'pull_request' }} + with: + github-token: ${{ secrets.GH_TOKEN }} + script: | + require("./.github/scripts/post_metrics.js")( + github, + context, + "${{ vars.BOT_USERNAME }}", + "${{ secrets.GH_TOKEN }}", + ).then(()=>{console.log("Done.");}); + - name: "[Push] Upload Metrics" + if: ${{ github.event_name == 'push' }} # && env.BRANCH_NAME == 'main' }} + run: | + CURRENT_SHA=$(git rev-parse HEAD) + REPO=efabless/openlane-metrics + BRANCH_NAME="commit-$CURRENT_SHA" + echo "Uploading to 'github.com/$REPO@$BRANCH_NAME'…" + cd current + git init -b $BRANCH_NAME + git add . + git config user.name "${{ vars.BOT_NAME }}" + git config user.email "${{ vars.BOT_EMAIL }}" + git commit -m "Upload" + git remote add origin "https://${{ vars.BOT_USERNAME }}:${{ secrets.GH_TOKEN }}@github.com/$REPO.git" + git push -fu origin $BRANCH_NAME # Force for if we have to re-run the CI for some reason publish: runs-on: ubuntu-22.04 needs: [build-linux-amd64, build-mac-amd64, build-docker, build-py] diff --git a/.gitignore b/.gitignore index c614cbbba..d34ebc413 100644 --- a/.gitignore +++ b/.gitignore @@ -29,7 +29,7 @@ logs/ failures_report.json abc.history slpp_all/ -*/runs +runs/ # Testing .coverage @@ -37,3 +37,9 @@ htmlcov/ prof/ sandbox/ /designs + +# Testing CI +package.json +node_modules +yarn.lock +tables_*.md diff --git a/Makefile b/Makefile index 368513242..03ac105df 100644 --- a/Makefile +++ b/Makefile @@ -37,13 +37,13 @@ lint: venv/manifest.txt .PHONY: test test: venv/manifest.txt - ./venv/bin/coverage run -m pytest + ./venv/bin/coverage run -m pytest -n auto ./venv/bin/coverage report ./venv/bin/coverage html .PHONY: test-all test-all: venv/manifest.txt - ./venv/bin/coverage run -m pytest --step-rx "." + ./venv/bin/coverage run -m pytest --step-rx "." -n auto ./venv/bin/coverage report ./venv/bin/coverage html diff --git a/default.nix b/default.nix index af2738513..2034aba56 100644 --- a/default.nix +++ b/default.nix @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. { + system, lib, clangStdenv, fetchFromGitHub, @@ -51,7 +52,7 @@ psutil, pytestCheckHook, pyfakefs, - system, + httpx, }: buildPythonPackage rec { name = "openlane"; @@ -115,6 +116,7 @@ buildPythonPackage rec { libparse ioplace-parser psutil + httpx klayout-pymod ] ++ includedTools; diff --git a/docs/source/reference/configuration.md b/docs/source/reference/configuration.md index 6a1d27771..a7a7e2e05 100644 --- a/docs/source/reference/configuration.md +++ b/docs/source/reference/configuration.md @@ -141,8 +141,10 @@ asterisk as a wildcard to pick multiple files in a specific folder. * Outside the design directory, this is disabled for security reasons and the final path will continue to include the asterisk. -* `refg::` will always return an array, even if only one element was found, - for consistency. +* `refg::` will always return an array, even if only one element was found, for + consistency. + * If no elements were found, the glob string is returned verbatim as a single + element in array. As shown below, `refg::$DESIGN_DIR/src/*.v` would find all files ending with `.v` in the `src` folder inside the design directory. diff --git a/docs/source/usage/hardening_macros.md b/docs/source/usage/hardening_macros.md index 043c938bc..a1a1275a6 100644 --- a/docs/source/usage/hardening_macros.md +++ b/docs/source/usage/hardening_macros.md @@ -251,7 +251,7 @@ Routing in general goes through these phases: The subsections include some notes: -#### Antenna Mitigation +### Antenna Mitigation To help mitigate the antenna effect, after Global Placement there are also three other steps you may choose to enable: diff --git a/docs/source/usage/writing_custom_flows.md b/docs/source/usage/writing_custom_flows.md index 877bfa68c..46a20bf39 100644 --- a/docs/source/usage/writing_custom_flows.md +++ b/docs/source/usage/writing_custom_flows.md @@ -215,7 +215,7 @@ its `StepError`s: --- language: python start-after: "step_list.append(step)" -end-before: "self.end_stage()" +end-before: "raise FlowError(str(e))" --- ``` diff --git a/nix/create-shell.nix b/nix/create-shell.nix index 155bed530..e57a11c0f 100644 --- a/nix/create-shell.nix +++ b/nix/create-shell.nix @@ -23,6 +23,7 @@ with pkgs; let openlane pyfakefs pytest + pytest-xdist pillow mdformat ] diff --git a/openlane/common/__init__.py b/openlane/common/__init__.py index 4f1b1d22f..cd3d1ffe4 100644 --- a/openlane/common/__init__.py +++ b/openlane/common/__init__.py @@ -22,13 +22,13 @@ from .tcl import TclUtils from .metrics import parse_metric_modifiers, aggregate_metrics +from . import metrics from .design_format import DesignFormat, DesignFormatObject from .generic_dict import ( GenericDictEncoder, GenericDict, GenericImmutableDict, copy_recursive, - is_string, ) from .misc import ( idem, @@ -39,14 +39,22 @@ protected, final, mkdirp, - Path, zip_first, format_size, format_elapsed_time, + Filter, + get_latest_file, +) +from .types import ( + is_number, + is_string, + Number, + Path, + ScopedFile, ) from .toolbox import Toolbox from .drc import DRC, Violation - +from . import cli ## TPE diff --git a/openlane/common/cli.py b/openlane/common/cli.py index 5cf7ff8da..d962d50bb 100644 --- a/openlane/common/cli.py +++ b/openlane/common/cli.py @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. from enum import IntEnum +from cloup import ( + HelpFormatter, + HelpTheme, + Style, +) from typing import Optional, Type, Union from click import ( @@ -19,11 +24,6 @@ Context, Parameter, ) -from cloup import ( - HelpFormatter, - HelpTheme, - Style, -) formatter_settings = HelpFormatter.settings( theme=HelpTheme( diff --git a/openlane/common/generic_dict.py b/openlane/common/generic_dict.py index e7cde64d2..73c767cec 100644 --- a/openlane/common/generic_dict.py +++ b/openlane/common/generic_dict.py @@ -32,6 +32,7 @@ ) from .misc import idem +from .types import is_string class GenericDictEncoder(json.JSONEncoder): @@ -256,10 +257,6 @@ def copy_mut(self) -> GenericDict[KT, VT]: return GenericDict(self) -def is_string(obj: Any) -> bool: - return isinstance(obj, str) or isinstance(obj, UserString) - - # Screw this, if you can figure out how to type hint mapping in dictionary out # and non-mapping in sequence out in Python, be my guest def copy_recursive(input, translator: Callable = idem): @@ -273,8 +270,8 @@ def copy_recursive(input, translator: Callable = idem): By default, :func:`idem` is called. :returns: The copy. - All sequences will become built-in ``list``s and all mappings will - become built-in ``dict``s. + All sequences will become built-in ``list``\\s and all mappings will + become built-in ``dict``\\s. """ def recursive(input, visit_stack: list, *, sequence_cls=list, mapping_cls=dict): diff --git a/openlane/common/metrics.py b/openlane/common/metrics.py deleted file mode 100644 index eba554c08..000000000 --- a/openlane/common/metrics.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2023 Efabless Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import re - -from typing import Mapping, Tuple, Dict, Any, Callable, Iterable - -modifier_rx = re.compile(r"([\w\-]+)\:([\w\-]+)") - - -def parse_metric_modifiers(metric_name: str) -> Tuple[str, Mapping[str, str]]: - """ - Parses a metric name into a base and modifiers as specified in - the `Metrics4ML standard `_. - - :param metric_name: The name of the metric as generated by a utility. - :returns: A tuple of the base part as a string, then the modifiers as - a key-value mapping. - """ - mn_mut = metric_name.split("__") - modifiers = {} - i = len(mn_mut) - 1 - if ":" in mn_mut[i]: - modifier_list = mn_mut[i].split(":") - if len(modifier_list) % 2 == 0: - for i in range(0, len(modifier_list) - 1, 2): - modifiers[modifier_list[i]] = modifier_list[i + 1] - mn_mut.pop() - return "__".join(mn_mut), modifiers - - -def aggregate_metrics( - input: Mapping[str, Any], - aggregator_by_metric: Mapping[str, Tuple[Any, Callable[[Iterable], Any]]], -) -> Dict[str, Any]: - """ - Takes a set of metrics generated according to the `Metrics4ML standard `_. - - :param metric_name: The name of the metric as generated by a utility. - :returns: A tuple of the base part as a string, then the modifiers as - a key-value mapping. - """ - aggregated: Dict[str, Any] = {} - for name, value in input.items(): - metric_name, modifiers = parse_metric_modifiers(name) - if len(modifiers) != 1: - # No modifiers = final aggregate, don't double-represent in sums - # >1 modifiers = n-level nesting, not supported atm - continue - entry = aggregator_by_metric.get(metric_name) - if entry is None: - continue - start, aggregator = entry - current = aggregated.get(metric_name) or start - aggregated[metric_name] = aggregator([current, value]) - - final_values = dict(input) - final_values.update(aggregated) - return final_values diff --git a/openlane/common/metrics/__init__.py b/openlane/common/metrics/__init__.py new file mode 100644 index 000000000..812fbc138 --- /dev/null +++ b/openlane/common/metrics/__init__.py @@ -0,0 +1,35 @@ +# Copyright 2023 Efabless Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Copyright 2023 Efabless Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Metrics Module +----------------------- + +Classes and functions for dealing with Metrics based on the `Metrics4ML `_ standard. +""" +from . import library +from .metric import MetricAggregator, MetricComparisonResult, Metric +from .util import parse_metric_modifiers, aggregate_metrics, MetricDiff diff --git a/openlane/common/metrics/__main__.py b/openlane/common/metrics/__main__.py new file mode 100644 index 000000000..d56056b96 --- /dev/null +++ b/openlane/common/metrics/__main__.py @@ -0,0 +1,401 @@ +# Copyright 2023 Efabless Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import json +import gzip +import click +import tarfile +import tempfile +from io import BytesIO +from decimal import Decimal +from typing import Optional, Set, Tuple + +import cloup +import httpx + +from .util import MetricDiff, TableVerbosity +from ..misc import Filter, get_httpx_session, mkdirp +from ..cli import formatter_settings, IntEnumChoice + +default_filter_set = [ + "design__*__area", + "design__max_*", + "design__lvs_error__count", + "antenna__violating*", + "clock__*", + "ir__*", + "power__*", + "timing__*_vio__*", + "*error*", + "!*__iter:*", +] + +# passing_filter_set = [ +# "design__*__area", +# "route__wirelength__max", +# "design__instance__utilization", +# "antenna__violating*", +# "timing__*__ws", +# "clock__skew__*", +# "ir__*", +# "power__*", +# "!*__iter:*", +# ] + + +@cloup.group( + no_args_is_help=True, + formatter_settings=formatter_settings, +) +def cli(): + pass + + +def common_opts(f): + f = cloup.option( + "-f", + "--filter", + "filter_wildcards", + multiple=True, + default=("DEFAULT",), + help="A list of wildcards to filter by. Wildcards prefixed with ! exclude rather than include and take priority. 'DEFAULT' is replaced by a set of default wildcards.", + )(f) + f = cloup.option( + "--table-verbosity", + type=IntEnumChoice(TableVerbosity), + default="ALL", + help=TableVerbosity.__doc__, + )(f) + f = cloup.option( + "--table-out", + type=click.Path(file_okay=True, dir_okay=False, writable=True), + help="The place to write the table to.", + default=None, + )(f) + f = cloup.option( + "--significant-figures", + type=int, + help="Number of significant figures.", + default=4, + )(f) + return f + + +@cloup.command(no_args_is_help=True) +@common_opts +@cloup.argument("metric_files", nargs=2) +def compare( + metric_files: Tuple[str, str], + table_verbosity: TableVerbosity, + filter_wildcards: Tuple[str, ...], + table_out: Optional[str], + significant_figures: int, +): + """ + Creates a small summary of the differences between two ``metrics.json`` files. + """ + if table_verbosity == "NONE": + print("Table is empty.", file=sys.stderr) + exit(0) + + a_path, b_path = metric_files + a = json.load(open(a_path, encoding="utf8"), parse_float=Decimal) + b = json.load(open(b_path, encoding="utf8"), parse_float=Decimal) + + final_filters = [] + for wildcard in filter_wildcards: + if wildcard == "DEFAULT": + final_filters += default_filter_set + else: + final_filters.append(wildcard) + + diff = MetricDiff.from_metrics( + a, b, significant_figures, filter=Filter(final_filters) + ) + + md_str = diff.render_md(sort_by=("corner", ""), table_verbosity=table_verbosity) + + table_file = sys.stdout + if table_out is not None: + table_file = open(table_out, "w", encoding="utf8") + print(md_str, file=table_file) + + # When we upgrade to rich 13 (when NixOS 23.11 comes out, + # it has a proper markdown table renderer, but until then, this will have to do) + + +cli.add_command(compare) + + +def _compare_metric_folders( + filter_wildcards: Tuple[str, ...], + table_verbosity: TableVerbosity, + path_a: str, + path_b: str, + significant_figures: int, +) -> Tuple[str, str]: # (summary, table) + a: Set[Tuple[str, str, str]] = set() + b: Set[Tuple[str, str, str]] = set() + + def add_designs(in_dir: str, to_set: Set[Tuple[str, str, str]]): + for file in os.listdir(in_dir): + basename = os.path.basename(file) + if not basename.endswith(".metrics.json"): + continue + basename = basename[: -len(".metrics.json")] + + parts = basename.split("-", maxsplit=2) + if len(parts) != 3: + raise ValueError( + f"Invalid filename {basename}: not in the format {{pdk}}-{{scl}}-{{design_name}}" + ) + pdk, scl, design = parts + to_set.add((pdk, scl, design)) + + add_designs(path_a, a) + add_designs(path_b, b) + + not_in_a = b - a + not_in_b = a - b + common = a.intersection(b) + difference_report = "" + for tup in not_in_a: + pdk, scl, design = tup + difference_report += f"* Results for a new test, `{'/'.join(tup)}`, detected.\n" + for tup in not_in_b: + pdk, scl, design = tup + difference_report += ( + f"* ‼️ Results for `{'/'.join(tup)}` appear to be missing!\n" + ) + + final_filters = [] + for wildcard in filter_wildcards: + if wildcard == "DEFAULT": + final_filters += default_filter_set + else: + final_filters.append(wildcard) + + filter = Filter(final_filters) + critical_change_report = "" + tables = "" + total_critical = 0 + for pdk, scl, design in sorted(common): + metrics_a = json.load( + open( + os.path.join(path_a, f"{pdk}-{scl}-{design}.metrics.json"), + encoding="utf8", + ), + parse_float=Decimal, + ) + + metrics_b = json.load( + open( + os.path.join(path_b, f"{pdk}-{scl}-{design}.metrics.json"), + encoding="utf8", + ), + parse_float=Decimal, + ) + + diff = MetricDiff.from_metrics( + metrics_a, + metrics_b, + significant_figures, + filter=filter, + ) + + stats = diff.stats() + + total_critical += stats.critical + if stats.critical > 0: + critical_change_report += f" * `{pdk}/{scl}/{design}` \n" + if table_verbosity != "NONE": + rendered = diff.render_md(("corner", ""), table_verbosity) + if rendered.strip() != "": + tables += f"
{pdk}/{scl}/{design}\n{rendered}\n
\n\n" + + if total_critical == 0: + critical_change_report = ( + "* No critical regressions were detected in analyzed designs.\n" + + critical_change_report + ) + else: + critical_change_report = ( + "* **Critical regressions were detected in the following designs:**\n" + + critical_change_report + ) + + report = "" + report += difference_report + report += critical_change_report + + return report, tables.strip() + + +@cloup.command(no_args_is_help=True) +@common_opts +@cloup.argument("metric_folders", nargs=2) +def compare_multiple( + filter_wildcards: Tuple[str, ...], + table_verbosity: TableVerbosity, + metric_folders: Tuple[str, str], + table_out: Optional[str], + significant_figures: int, +): + """ + Creates a small summary/report of the differences between two folders with + metrics files. + + The metrics files must be named in the format ``{pdk}-{scl}-{design}.metrics.json``. + All other files are ignored. + """ + path_a, path_b = metric_folders + summary, tables = _compare_metric_folders( + filter_wildcards, table_verbosity, path_a, path_b, significant_figures + ) + print(summary) + table_file = sys.stdout + if table_out is not None: + table_file = open(table_out, "w", encoding="utf8") + print(tables, file=table_file) + + +cli.add_command(compare_multiple) + + +@cloup.command(hidden=True) +@cloup.option( + "-r", + "--repo", + default="efabless/openlane2", + help="The GitHub repository for OpenLane", +) +@cloup.option( + "-m", + "--metric-repo", + default="efabless/openlane-metrics", + help="The repository storing metrics for --repo", +) +@cloup.option( + "-c", + "--commit", + default=None, + help="The commit of --repo to fetch the metrics for. By default, that's the latest commit in the main branch.", +) +@cloup.option( + "-t", + "--token", + default=None, + help="A GitHub token to use to query the API and fetch the metrics. Not strictly required, but helps avoid rate-limiting.", +) +@common_opts +@cloup.argument("metric_folder", nargs=1) +def compare_main( + filter_wildcards: Tuple[str, ...], + table_verbosity: TableVerbosity, + repo: str, + metric_repo: str, + commit: Optional[str], + token: str, + metric_folder: str, + table_out: Optional[str], + significant_figures: int, +): + """ + Creates a small summary/report of the differences between a folder and + a set of metrics stored in --metric-repo. Requires Internet access and + access to GitHub. + + The metrics files must be named in the format ``{pdk}-{scl}-{design}.metrics.json``. + All other files are ignored. + """ + session = get_httpx_session(token) + + if commit is None: + try: + result = session.get(f"https://api.github.com/repos/{repo}/branches/main") + except httpx.HTTPStatusError as e: + if e.response is not None and e.response.status_code == 404: + print(f"main branch of repo {repo} not found.", file=sys.stderr) + else: + print( + f"failed to get info from github API: {e.response.status_code}", + file=sys.stderr, + ) + sys.exit(-1) + result.raise_for_status() + commit = str(result.json()["commit"]["sha"]) + url = f"https://github.com/{metric_repo}/tarball/commit-{commit}" + + try: + with tempfile.TemporaryDirectory() as d: + bio_gz = BytesIO() + with session.stream("GET", url) as r: + r.raise_for_status() + for chunk in r.iter_bytes(chunk_size=8192): + bio_gz.write(chunk) + bio_gz.seek(0) + with gzip.GzipFile(fileobj=bio_gz) as bio, tarfile.TarFile( + fileobj=bio, mode="r" + ) as tf: + for file in tf: + if file.isdir(): + continue + stripped = os.path.sep.join(file.name.split(os.path.sep)[1:]) + final_path = os.path.join(d, stripped) + final_dir = os.path.dirname(final_path) + mkdirp(final_dir) + io = tf.extractfile(file) + if io is None: + print( + f"Failed to unpack file in tarball: {file.name}.", + file=sys.stderr, + ) + else: + with open(final_path, "wb") as f: + f.write(io.read()) + + summary, tables = _compare_metric_folders( + filter_wildcards, + table_verbosity, + d, + metric_folder, + significant_figures, + ) + print(summary) + table_file = sys.stdout + if table_out is not None: + table_file = open(table_out, "w", encoding="utf8") + print(tables, file=table_file) + except httpx.HTTPStatusError as e: + if e.response is not None and e.response.status_code == 404: + print(f"Metrics not found for commit: {commit}.", file=sys.stderr) + else: + if e.response is not None: + print( + f"Failed to obtain metrics for {commit} remotely: {e.response}.", + file=sys.stderr, + ) + else: + print( + f"Failed to request metrics for {commit} from server: {e}.", + file=sys.stderr, + ) + sys.exit(-1) + + +cli.add_command(compare_main) + +if __name__ == "__main__": + cli() diff --git a/openlane/common/metrics/library.py b/openlane/common/metrics/library.py new file mode 100644 index 000000000..43af9a5e8 --- /dev/null +++ b/openlane/common/metrics/library.py @@ -0,0 +1,354 @@ +# Copyright 2023 Efabless Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .metric import Metric, sum_aggregator, min_aggregator, max_aggregator + + +# Area and Counts +Metric( + "design__core__area", + higher_is_better=False, +) +Metric( + "design__die__area", + higher_is_better=False, +) +Metric( + "design__instance__area", + higher_is_better=False, +) +Metric( + "design__instance__count", + higher_is_better=False, +) + +# Power +Metric( + "ir__drop__avg", + higher_is_better=False, +) +Metric( + "ir__drop__worst", + aggregator=max_aggregator, + higher_is_better=False, +) +Metric( + "ir__voltage__worst", + aggregator=min_aggregator, + higher_is_better=True, +) +Metric( + "design_powergrid__drop__average", + higher_is_better=False, +) +Metric( + "design_powergrid__drop__worst", + aggregator=max_aggregator, + higher_is_better=False, +) +Metric( + "design_powergrid__voltage__worst", + aggregator=min_aggregator, + higher_is_better=True, +) +Metric( + "power__internal__total", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "power__leakage__total", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "power__switching__total", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "power__total", + aggregator=sum_aggregator, + higher_is_better=False, +) + +# Timing +Metric( + "timing__hold_vio__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "timing__hold_r2r_vio__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "timing__setup_vio__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "timing__setup_r2r_vio__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "timing__hold__ws", + aggregator=min_aggregator, + higher_is_better=True, +) +Metric( + "timing__hold_r2r__ws", + aggregator=min_aggregator, + higher_is_better=True, +) +Metric( + "timing__setup__ws", + aggregator=min_aggregator, + higher_is_better=True, +) + +Metric( + "timing__setup_r2r__ws", + aggregator=min_aggregator, + higher_is_better=True, +) +Metric( + "timing__hold__wns", + aggregator=min_aggregator, + higher_is_better=True, + critical=True, +) +Metric( + "timing__setup__wns", + aggregator=min_aggregator, + higher_is_better=True, + critical=True, +) +Metric( + "timing__hold__tns", + aggregator=min_aggregator, + higher_is_better=True, + critical=True, +) +Metric( + "timing__unannotated_net__count", + aggregator=max_aggregator, + higher_is_better=False, +) +Metric( + "timing__unannotated_net_filtered__count", + aggregator=max_aggregator, + higher_is_better=False, +) +Metric( + "timing__setup__tns", + aggregator=min_aggregator, + higher_is_better=True, + critical=True, +) +Metric( + "clock__skew__worst_hold", + aggregator=max_aggregator, + higher_is_better=False, +) +Metric( + "clock__skew__worst_setup", + aggregator=min_aggregator, + higher_is_better=True, +) + +# Constraint Violation +Metric( + "design__max_slew_violation__count", + aggregator=max_aggregator, + higher_is_better=False, +) +Metric( + "design__max_fanout_violation__count", + aggregator=max_aggregator, + higher_is_better=False, +) +Metric( + "design__max_cap_violation__count", + aggregator=max_aggregator, + higher_is_better=False, +) + +# Placement and Routing +Metric( + "route__wirelength", + aggregator=sum_aggregator, + higher_is_better=False, + dont_aggregate=["iter"], +) +Metric( + "route__wirelength__estimated", + aggregator=sum_aggregator, + higher_is_better=False, + dont_aggregate=["iter"], +) +Metric( + "route__wirelength__max", + aggregator=max_aggregator, + higher_is_better=False, + dont_aggregate=["iter"], +) +Metric( + "route__antenna_violation__count", + aggregator=sum_aggregator, + higher_is_better=False, + dont_aggregate=["iter"], +) +Metric( + "design__instance__displacement__max", + aggregator=max_aggregator, + higher_is_better=False, +) +Metric( + "design__instance__displacement__total", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "design__instance__utilization", + higher_is_better=True, +) + +# Potential Issues +Metric( + "design__lint_warning__count", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "design__lint_error__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__lint_timing_construct__count", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "antenna__violating__nets", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "antenna__violating__pins", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "design__instance_unmapped__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__disconnected_pin__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__inferred_latch__count", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "design__violations", + aggregator=sum_aggregator, + higher_is_better=False, +) +Metric( + "design__xor_difference__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "route__drc_errors", + aggregator=sum_aggregator, + higher_is_better=False, + dont_aggregate=["iter"], + critical=True, +) +Metric( + "magic__drc_error__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "magic__illegal_overlap__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "synthesis__check_error__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__lvs_error__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__lvs_device_difference__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__lvs_net_difference__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__lvs_property_fail__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__lvs_unmatched_device__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__lvs_unmatched_net__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) +Metric( + "design__lvs_unmatched_pin__count", + aggregator=sum_aggregator, + higher_is_better=False, + critical=True, +) diff --git a/openlane/common/metrics/metric.py b/openlane/common/metrics/metric.py new file mode 100644 index 000000000..9824bbc0a --- /dev/null +++ b/openlane/common/metrics/metric.py @@ -0,0 +1,187 @@ +# Copyright 2023 Efabless Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from math import inf +from decimal import Decimal +from dataclasses import dataclass +from typing import Any, Callable, Iterable, Mapping, Optional, Tuple, ClassVar, Dict + + +from ..types import Number, is_number, is_real_number + +MetricAggregator = Tuple[Number, Callable[[Iterable[Number]], Number]] + +sum_aggregator: MetricAggregator = (0, lambda x: sum(x)) +min_aggregator: MetricAggregator = (inf, min) +max_aggregator: MetricAggregator = (-inf, max) + + +@dataclass +class MetricComparisonResult: + """ + :param metric_name: The name of the metric that has been compared + :param gold: The "gold" value being compared against + :param new: The new value being evaluated + :param delta: ``None`` if and only if ``before`` - ``after`` is an invalid number. + Evaluates to ``after - before``\\. + :param delta_pct: ``None`` if ``delta`` is None or before is zero. + Otherwise, evaluates to ``delta / before * 100``\\. + :param better: Whether the change in the value is considered a good thing or + not. ``None`` if ``delta`` is None or has no value set for + ``Metric.higher_is_better``\\. + :param critical: Whether this change of value very likely results in a dead + chip, i.e., an increase in DRC values, or an inexplicable change in + the number of I/O pins. + + """ + + metric_name: str + gold: Any + new: Any + delta: Optional[Number] + delta_pct: Optional[Number] + better: Optional[bool] + critical: bool + significant_figures: Optional[int] + + def is_changed(self) -> bool: + return (self.delta is not None and self.delta != 0) or self.gold != self.new + + def format_values(self) -> Tuple[str, str, str]: + before_str = str(self.gold) + if isinstance(self.gold, float) or isinstance(self.gold, Decimal): + before_str = str(f"{self.gold:.{self.significant_figures}f}") + + after_str = str(self.new) + if isinstance(self.new, float) or isinstance(self.new, Decimal): + after_str = str(f"{self.new:.{self.significant_figures}f}") + + delta_str = "N/A" + if self.delta is not None: + delta_str = str(round(self.delta, 6)) + if isinstance(self.delta, float) or isinstance(self.delta, Decimal): + delta_str = str(f"{self.delta:.{self.significant_figures}f}") + if self.delta_pct is not None: + delta_pct_str = str(f"{self.delta_pct:.{self.significant_figures}f}") + if self.delta_pct > 0: + delta_pct_str = f"+{delta_pct_str}" + delta_str = f"{delta_str} ({delta_pct_str}%)" + + return before_str, after_str, delta_str + + +@dataclass +class Metric(object): + """ + An object storing data about a metric as defined in + . + + :param name: The string name of the metric. + :param aggregator: A tuple of: + - A starting value for an accumulator + - A reduction function + + The aim is the ability to aggregate values from various sub-metrics, + i.e., for the metric ``timing__hold_vio__count``, the sub-metrics: + + - ``timing__hold_vio__count__corner:A`` + - ``timing__hold_vio__count__corner:B`` + + Would be summed up to generate the value for ``timing__hold_vio__count``. + :param higher_is_better: At a high level, whether a higher numeric value for + this metric is considered "good" (such as: better utilization) or "bad" + (such as: more antenna violations.) + :param critical: A critical metric is always watched for any change. + + """ + + name: str + aggregator: Optional[MetricAggregator] = None + higher_is_better: Optional[bool] = None + dont_aggregate: Optional[Iterable[str]] = None + critical: bool = False + + by_name: ClassVar[Dict[str, "Metric"]] = {} + + def __post_init__(self): + Metric.by_name[self.name] = self + + def modified_name(self, modifiers: Mapping[str, str]) -> str: + """ + :param modifiers: Modifiers of a metric (i.e. the elements postfixed to the metric in the format {key}:{value}) + :returns: The name with the modifiers added + """ + return "__".join([self.name] + [f"{k}:{v}" for k, v in modifiers.items()]) + + def compare( + self, + gold: Any, + new: Any, + significant_figures: int, + modifiers: Optional[Mapping[str, str]] = None, + ) -> MetricComparisonResult: + """ + :param gold: The "gold-standard" value for this metric to compare against + :param new: The new value for this metric being evaluated + :param modifier: The modifiers that were parsed from the metric name + (if applicable)- used to set the ``metric_name`` property of + :class:`MetricComparisonResult`. + :returns: The result of comparing two values for this metric. + """ + is_better = None + is_critical = self.critical and (gold != new) + delta = None + delta_pct = None + + if modifiers is None: + modifiers = {} + + if is_real_number(gold) and is_real_number(new): + if isinstance(gold, float) or isinstance(new, float): + gold = Decimal(gold) + new = Decimal(new) + delta = new - gold + + if gold == 0: + if new == 0: + delta_pct = Decimal(0) + else: + delta_pct = Decimal((delta / gold) * 100) + if delta_pct == 0: + delta_pct = Decimal(0) # Fix negative zero + + if self.higher_is_better is not None: + if self.higher_is_better: + is_better = delta >= 0 + else: + is_better = delta <= 0 + elif self.higher_is_better is not None: + if not is_number(gold): + raise TypeError( + f"'{gold}' is not a number for metric {self.name} with non-None 'higher_is_better' field" + ) + if not is_number(new): + raise TypeError( + f"'{new}' is not a number for metric {self.name} with non-None 'higher_is_better' field" + ) + + return MetricComparisonResult( + self.modified_name(modifiers), + gold, + new, + delta, + delta_pct, + is_better, + is_critical, + significant_figures, + ) diff --git a/openlane/common/metrics/util.py b/openlane/common/metrics/util.py new file mode 100644 index 000000000..02e01e80b --- /dev/null +++ b/openlane/common/metrics/util.py @@ -0,0 +1,275 @@ +# Copyright 2023 Efabless Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re +import textwrap +from enum import IntEnum +from dataclasses import dataclass +from typing import ( + List, + Mapping, + Tuple, + Dict, + Any, + Iterable, + Optional, + Union, +) + +from .metric import Metric, MetricAggregator, MetricComparisonResult +from ..misc import Filter + +modifier_rx = re.compile(r"([\w\-]+)\:([\w\-]+)") + + +class TableVerbosity(IntEnum): + """ + The verbosity of the table: whether to include everything, just changes, only + bad changes or only critical change. Or just nothing. + """ + + NONE = 0 + CRITICAL = 1 + WORSE = 2 + CHANGED = 3 + ALL = 4 + + +def parse_metric_modifiers(metric_name: str) -> Tuple[str, Mapping[str, str]]: + """ + Parses a metric name into a base and modifiers as specified in + the `Metrics4ML standard `_. + + :param metric_name: The name of the metric as generated by a utility. + :returns: A tuple of the base part as a string, then the modifiers as + a key-value mapping. + """ + mn_mut = metric_name.split("__") + modifiers = {} + while ":" in mn_mut[-1]: + key, value = mn_mut.pop().split(":", maxsplit=1) + modifiers[key] = value + return "__".join(mn_mut), modifiers + + +def aggregate_metrics( + input: Mapping[str, Any], + aggregator_by_metric: Optional[ + Mapping[str, Union[MetricAggregator, Metric]] + ] = None, +) -> Dict[str, Any]: + """ + Takes a set of metrics generated according to the + `Metrics4ML standard `_. + + :param input: A mapping of strings to values of metrics. + :param aggregator_by_metric: A mapping of metric names to either: + - A tuple of the initial accumulator and reducer to aggregate the values from all modifier metrics + - A :class:`Metric` class + :returns: A tuple of the base part as a string, then the modifiers as + a key-value mapping. + """ + if aggregator_by_metric is None: + aggregator_by_metric = Metric.by_name + + aggregated: Dict[str, Any] = {} + for name, value in input.items(): + metric_name, modifiers = parse_metric_modifiers(name) + if len(modifiers) != 1: + # No modifiers = final aggregate, don't double-represent in sums + # >1 modifiers = n-level nesting, not supported atm + continue + + modifier = list(modifiers.keys())[0] + + dont_aggregate: Iterable[str] = [] + entry = aggregator_by_metric.get(metric_name) + if isinstance(entry, Metric): + dont_aggregate = entry.dont_aggregate or [] + entry = entry.aggregator + if entry is None: + continue + if modifier in dont_aggregate: + continue + start, aggregator = entry + current = aggregated.get(metric_name) or start + aggregated[metric_name] = aggregator([current, value]) + + final_values = dict(input) + final_values.update(aggregated) + return final_values + + +def _key_from_metrics(fields: Iterable[str], metric: str) -> List[str]: + base, modifiers = parse_metric_modifiers(metric) + result = [] + for field in fields: + if field == "": + result.append(base) + else: + result.append(modifiers.get(field, "")) + return result + + +class MetricDiff(object): + """ + Aggregates a number of ``MetricComparisonResult`` and allows a number of + functions to be performed on them. + + :param differences: The metric comparison results. + """ + + @dataclass + class MetricStatistics: + """ + A glorified namespace encapsulating a number of statistics of + :class:`MetricDiff`. + + Should be generated using :meth:`MetricDiff.stats`. + + :param better: The number of datapoints that represent a positive change. + :param worse: The number of datapoints that represent a negative change. + :param critical: The number of changes for critical metrics. + :param unchanged: Values that are unchanged. + """ + + better: int = 0 + worse: int = 0 + critical: int = 0 + unchanged: int = 0 + + differences: List[MetricComparisonResult] + + def __init__(self, differences: Iterable[MetricComparisonResult]) -> None: + self.differences = list(differences) + + def render_md( + self, + sort_by: Optional[Iterable[str]] = None, + table_verbosity: TableVerbosity = TableVerbosity.ALL, + ) -> str: + """ + :param sort_by: A list of tuples corresponding to modifiers to sort + metrics ascendingly by. + :param table_verbosity: The verbosity of the table: whether to include everything, just changes, only bad changes or only critical changes. Or just nothing. + :returns: A table of the differences in Markdown format. + """ + if table_verbosity == TableVerbosity.NONE: + return "" + + differences = self.differences + if fields := sort_by: + differences = sorted( + differences, + key=lambda x: _key_from_metrics(fields, x.metric_name), + ) + + table = "" + + changed = [] + worse = [] + critical = [] + remaining = [] + + for row in differences: + if row.critical is True: + critical.append(row) + elif row.better is False: + worse.append(row) + elif row.is_changed(): + changed.append(row) + else: + remaining.append(row) + + listed_differences: List[MetricComparisonResult] = [] + if table_verbosity >= TableVerbosity.CRITICAL: + listed_differences += critical + if table_verbosity >= TableVerbosity.WORSE: + listed_differences += worse + if table_verbosity >= TableVerbosity.CHANGED: + listed_differences += changed + if table_verbosity >= TableVerbosity.ALL: + listed_differences += remaining + + if len(listed_differences) > 0: + table = textwrap.dedent( + f""" + | {'Metric':<70} | {'Before':<10} | {'After':<10} | {'Delta':<20} | + | {'-':<70} | {'-':<10} | {'-':<10} | {'-':<20} | + """ + ) + + for row in listed_differences: + before, after, delta = row.format_values() + emoji = "" + if row.better is not None: + if row.better: + emoji = " ⭕" + else: + emoji = " ❗" + if row.critical and row.is_changed(): + emoji = " ‼️" + table += f"| {row.metric_name:<70} | {before:<10} | {after:<10} | {f'{delta}{emoji}':<20} |\n" + + return table + + def stats(self) -> MetricStatistics: + """ + :returns: A :class:`MetricStatistics` object based on this aggregate. + """ + stats = MetricDiff.MetricStatistics() + for row in self.differences: + if not row.is_changed(): + stats.unchanged += 1 + elif row.better is not None: + if row.better: + stats.better += 1 + else: + stats.worse += 1 + if row.critical: + stats.critical += 1 + return stats + + @classmethod + def from_metrics( + Self, + gold: dict, + new: dict, + significant_figures: int, + filter: Filter = Filter(["*"]), + ) -> "MetricDiff": + """ + Creates a :class:`MetricDiff` object from two sets of metrics. + + :param gold: The "gold-standard" metrics to compare against + :param new: The metrics being evaluated + :param filter: A :class:`Filter` for the names of the metrics to include + or exclude certain metrics. + :returns: The aggregate of the differences between gold and good + """ + + def generator(g, n): + for metric in filter.filter(sorted(n.keys())): + if metric not in g: + continue + base_metric, modifiers = parse_metric_modifiers(metric) + lhs_value, rhs_value = g[metric], n[metric] + if type(lhs_value) != type(rhs_value): + lhs_value = type(rhs_value)(lhs_value) + + if metric_object := Metric.by_name.get(base_metric): + yield metric_object.compare( + lhs_value, rhs_value, significant_figures, modifiers=modifiers + ) + + return MetricDiff(generator(gold, new)) diff --git a/openlane/common/misc.py b/openlane/common/misc.py index b08c3e333..a72f12bc5 100644 --- a/openlane/common/misc.py +++ b/openlane/common/misc.py @@ -13,21 +13,25 @@ # limitations under the License. import os import re -import sys +import glob import typing +import fnmatch import pathlib import unicodedata -from collections import UserString +from math import inf from typing import ( Any, - ClassVar, + Generator, Iterable, TypeVar, Optional, + SupportsFloat, Union, - Tuple, ) +import httpx +from .types import Path +from ..__version__ import __version__ T = TypeVar("T") @@ -148,61 +152,6 @@ def mkdirp(path: typing.Union[str, os.PathLike]): return pathlib.Path(path).mkdir(parents=True, exist_ok=True) -class Path(UserString, os.PathLike): - """ - A Path type for OpenLane configuration variables. - - Basically just a string. - """ - - # This path will pass the validate() call, but will - # fail to open. It should be used for deprecated variable - # translation only. - _dummy_path: ClassVar[str] = "__openlane_dummy_path" - - def __fspath__(self) -> str: - return str(self) - - def __repr__(self) -> str: - return f"{self.__class__.__qualname__}('{self}')" - - def exists(self) -> bool: - """ - A convenience method calling :meth:`os.path.exists` - """ - return os.path.exists(self) - - def validate(self): - """ - Raises an error if the path does not exist. - """ - if not self.exists() and not self == Path._dummy_path: - raise ValueError(f"'{self}' does not exist") - - def startswith( - self, - prefix: Union[str, Tuple[str, ...], UserString, os.PathLike], - start: Optional[int] = 0, - end: Optional[int] = sys.maxsize, - ) -> bool: - if isinstance(prefix, UserString) or isinstance(prefix, os.PathLike): - prefix = str(prefix) - return super().startswith(prefix, start, end) - - def rel_if_child( - self, - start: Union[str, os.PathLike] = os.getcwd(), - *, - relative_prefix: str = "", - ) -> "Path": - my_abspath = os.path.abspath(self) - start_abspath = os.path.abspath(start) - if my_abspath.startswith(start_abspath): - return Path(relative_prefix + os.path.relpath(self, start_abspath)) - else: - return Path(my_abspath) - - class zip_first(object): """ Works like ``zip_longest`` if |a| > |b| and ``zip`` if |a| <= |b|. @@ -249,7 +198,13 @@ def format_size(byte_count: int) -> str: return f"{so_far}{units[tracker]}" -def format_elapsed_time(elapsed_seconds: float) -> str: +def format_elapsed_time(elapsed_seconds: SupportsFloat) -> str: + """ + :param elapsed_seconds: Total time elapsed in seconds + :returns: A string in the format ``{hours}:{minutes}:{seconds}:{milliseconds}`` + """ + elapsed_seconds = float(elapsed_seconds) + hours = int(elapsed_seconds // 3600) leftover = elapsed_seconds % 3600 @@ -260,3 +215,95 @@ def format_elapsed_time(elapsed_seconds: float) -> str: milliseconds = int((leftover % 1) * 1000) return f"{hours:02}:{minutes:02}:{seconds:02}.{milliseconds:03}" + + +class Filter(object): + """ + Encapsulates commonly used wildcard-based filtering functions into an object. + + :param filters: A list of a wildcards supporting the + `fnmatch spec `_. + + The wildcards will be split into an "allow" and "deny" list based on whether + the filter is prefixed with a ``!``. + """ + + def __init__(self, filters: Iterable[str]): + self.allow = [] + self.deny = [] + for filter in filters: + if filter.startswith("!"): + self.deny.append(filter[1:]) + else: + self.allow.append(filter) + + def get_matching_wildcards(self, input: str) -> Generator[str, Any, None]: + """ + :param input: An input to match wildcards against. + :returns: An iterable object for *all* wildcards in the allow list accepting + ``input``, and *all* wildcards in the deny list rejecting ``input``. + """ + for wildcard in self.allow: + if fnmatch.fnmatch(input, wildcard): + yield wildcard + for wildcard in self.deny: + if not fnmatch.fnmatch(input, wildcard): + yield wildcard + + def filter( + self, + inputs: Iterable[str], + ) -> Generator[str, Any, None]: + """ + :param inputs: A series of inputs to filter according to the wildcards. + :returns: An iterable object of any values in ``inputs`` that: + * Have matched at least one wildcard in the allow list + * Have matched exactly 0 inputs in the deny list + """ + for input in inputs: + allowed = False + for wildcard in self.allow: + if fnmatch.fnmatch(input, wildcard): + allowed = True + break + for wildcard in self.deny: + if fnmatch.fnmatch(input, wildcard): + allowed = False + break + if allowed: + yield input + + +def get_latest_file(in_path: Union[str, os.PathLike], filename: str) -> Optional[Path]: + """ + :param in_path: A directory to search in + :param filename: The final filename + :returns: The latest file matching the parameters, by modification time + """ + glob_results = glob.glob(os.path.join(in_path, "**", filename), recursive=True) + latest_time = -inf + latest_json = None + for result in glob_results: + time = os.path.getmtime(result) + if time > latest_time: + latest_time = time + latest_json = Path(result) + + return latest_json + + +def get_httpx_session(token: Optional[str] = None) -> httpx.Client: + """ + Creates an ``httpx`` session client that follows redirects and has the + User-Agent header set to ``openlane2/{__version__}``. + + :param token: If this parameter is non-None and not empty, another header, + Authorization: Bearer {token}, is included. + :returns: The created client + """ + session = httpx.Client(follow_redirects=True) + headers_raw = {"User-Agent": f"openlane2/{__version__}"} + if token is not None and token.strip() != "": + headers_raw["Authorization"] = f"Bearer {token}" + session.headers = httpx.Headers(headers_raw) + return session diff --git a/openlane/common/toolbox.py b/openlane/common/toolbox.py index 8e5c80c8d..57efb40e7 100644 --- a/openlane/common/toolbox.py +++ b/openlane/common/toolbox.py @@ -15,7 +15,6 @@ import re import uuid import shutil -import fnmatch import tempfile import subprocess from enum import IntEnum @@ -38,10 +37,12 @@ from deprecated.sphinx import deprecated -from .misc import Path, mkdirp +from .misc import mkdirp +from .types import Path from .metrics import aggregate_metrics from .design_format import DesignFormat from .generic_dict import GenericImmutableDict, is_string +from ..common import Filter from ..logging import debug, warn, err, verbose @@ -94,9 +95,8 @@ def filter_views( timing_corner = timing_corner or config["DEFAULT_CORNER"] result: List[Path] = [] - for key, value in views_by_corner.items(): - if not fnmatch.fnmatch(timing_corner, key): - continue + for key in Filter(views_by_corner).get_matching_wildcards(timing_corner): + value = views_by_corner[key] if is_string(value): result += [value] # type: ignore else: @@ -183,19 +183,18 @@ def get_timing_files( :param timing_corner: A fully qualified IPVT corner to get SCL libs for. - If not specified, the value for `DEFAULT_CORNER` from the SCL will + If not specified, the value for ``DEFAULT_CORNER`` from the SCL will be used. :param prioritize_nl: Do not return lib files for macros that have gate-Level Netlists and SPEF views. - If set to ``false``, only lib files are returned. + If set to ``false``\\, only lib files are returned. :returns: A tuple of: * The name of the timing corner - * A heterogenous list of files - - Lib files are returned as-is - - Netlists are returned as-is - - SPEF files are returned in the format "{instance_name}@{spef_path}" + * A heterogenous list of files composed of: Lib files are returned as-is, + Netlists are returned as-is, and SPEF files are returned in the + format ``{instance_name}@{spef_path}``\\. It is left up to the step or tool to process this list as they see fit. @@ -221,6 +220,9 @@ def get_timing_files( ) if prioritize_nl: netlists = macro.nl + if isinstance(netlists, Path): + netlists = [netlists] + spefs = self.filter_views( config, macro.spef, diff --git a/openlane/common/types.py b/openlane/common/types.py new file mode 100644 index 000000000..f02b969a9 --- /dev/null +++ b/openlane/common/types.py @@ -0,0 +1,114 @@ +# Copyright 2023 Efabless Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import tempfile +from math import isfinite +from decimal import Decimal +from collections import UserString +from typing import Any, Union, ClassVar, Tuple, Optional + + +def is_string(obj: Any) -> bool: + return isinstance(obj, str) or isinstance(obj, UserString) + + +Number = Union[int, float, Decimal] + + +def is_number(obj: Any) -> bool: + return isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, Decimal) + + +def is_real_number(obj: Any) -> bool: + return is_number(obj) and isfinite(obj) + + +class Path(UserString, os.PathLike): + """ + A Path type for OpenLane configuration variables. + + Basically just a string. + """ + + # This path will pass the validate() call, but will + # fail to open. It should be used for deprecated variable + # translation only. + _dummy_path: ClassVar[str] = "__openlane_dummy_path" + + def __fspath__(self) -> str: + return str(self) + + def __repr__(self) -> str: + return f"{self.__class__.__qualname__}('{self}')" + + def exists(self) -> bool: + """ + A convenience method calling :meth:`os.path.exists` + """ + return os.path.exists(self) + + def validate(self, message_on_err: str = ""): + """ + Raises an error if the path does not exist. + """ + if not self.exists() and not self == Path._dummy_path: + raise ValueError(f"{message_on_err}: '{self}' does not exist") + + def startswith( + self, + prefix: Union[str, Tuple[str, ...], UserString, os.PathLike], + start: Optional[int] = 0, + end: Optional[int] = sys.maxsize, + ) -> bool: + if isinstance(prefix, UserString) or isinstance(prefix, os.PathLike): + prefix = str(prefix) + return super().startswith(prefix, start, end) + + def rel_if_child( + self, + start: Union[str, os.PathLike] = os.getcwd(), + *, + relative_prefix: str = "", + ) -> "Path": + my_abspath = os.path.abspath(self) + start_abspath = os.path.abspath(start) + if my_abspath.startswith(start_abspath): + return Path(relative_prefix + os.path.relpath(self, start_abspath)) + else: + return Path(my_abspath) + + +class ScopedFile(Path): + """ + Creates a temporary file that remains valid while this variable is in scope, + and is deleted upon deconstruction. + + The object itself is a string pointing to that file path. + + :param contents: The contents of the temporary file to create. + """ + + def __init__(self, *, contents="") -> None: + self._ntf = tempfile.NamedTemporaryFile( + "w", + delete=False, + encoding="utf8", + ) + super().__init__(self._ntf.name) + self._ntf.write(contents) + self._ntf.close() + + def __del__(self): + os.unlink(self._ntf.name) diff --git a/openlane/config/config.py b/openlane/config/config.py index 2b878ac6f..cce991d1c 100644 --- a/openlane/config/config.py +++ b/openlane/config/config.py @@ -80,7 +80,8 @@ def __init__( if message is None: message = "The following errors were encountered: \n" for error in self.errors: - message += f"\t* {error}" + message += f"\t* {error}\n" + message = message.strip() super().__init__(message, *args, **kwargs) diff --git a/openlane/config/flow.py b/openlane/config/flow.py index 5814873d5..546d7d983 100644 --- a/openlane/config/flow.py +++ b/openlane/config/flow.py @@ -32,7 +32,7 @@ def _prefix_to_wildcard(prefixes_raw: Union[str, Sequence[str]]): Variable( "STD_CELL_LIBRARY", str, - "Specifies the default standard cell library to be used under the specified PDK.", + "Specifies the default standard cell library to be used under the specified PDK. Must be a valid C identifier, i.e., matches the regular expression `[_a-zA-Z][_a-zA-Z0-9]+`.", pdk=True, ), Variable( @@ -441,12 +441,12 @@ def _prefix_to_wildcard(prefixes_raw: Union[str, Sequence[str]]): Variable( "DESIGN_NAME", str, - "The name of the top level module of the design. This is the only variable that MUST be set in every single OpenLane configuration file or dictionary.", + "The name of the top level module of the design. Must be a valid C identifier, i.e., matches the regular expression `[_a-zA-Z][_a-zA-Z0-9]+`.", ), Variable( "PDK", str, - "Specifies the process design kit (PDK).", + "Specifies the process design kit (PDK). Must be a valid C identifier, i.e., matches the regular expression `[_a-zA-Z][_a-zA-Z0-9]+`.", default="sky130A", ), Variable( diff --git a/openlane/config/preprocessor.py b/openlane/config/preprocessor.py index a7c13989f..04f7069c8 100644 --- a/openlane/config/preprocessor.py +++ b/openlane/config/preprocessor.py @@ -288,6 +288,10 @@ def process_string( files = sorted(glob.glob(final_abspath)) files_escaped = [file.replace("$", r"\$") for file in files] files_escaped.sort() + + if len(files_escaped) == 0: + files_escaped = [concatenated] + return files_escaped else: return mutable diff --git a/openlane/config/variable.py b/openlane/config/variable.py index aa4e683c9..42f3dd0e3 100644 --- a/openlane/config/variable.py +++ b/openlane/config/variable.py @@ -15,7 +15,15 @@ import inspect from enum import Enum from decimal import Decimal, InvalidOperation -from dataclasses import _MISSING_TYPE, MISSING, dataclass, field, fields, is_dataclass +from dataclasses import ( + _MISSING_TYPE, + MISSING, + asdict, + dataclass, + field, + fields, + is_dataclass, +) from typing import ( ClassVar, Dict, @@ -32,13 +40,36 @@ get_origin, get_args, ) -from ..state import DesignFormat -from ..common import GenericDict, Path, is_string, zip_first +from ..state import DesignFormat, State +from ..common import GenericDict, Path, is_string, zip_first, Number # Scalar = Union[Type[str], Type[Decimal], Type[Path], Type[bool]] # VType = Union[Scalar, List[Scalar]] +class Orientation(str, Enum): + N = "N" + FN = "FN" + W = "W" + FW = "FW" + S = "S" + FS = "FS" + E = "E" + FE = "FE" + # OpenAccess + R0 = "N" + MY = "FN" + R90 = "W" + MXR90 = "FW" + R180 = "S" + MX = "FS" + R270 = "E" + MYR90 = "FE" + + def __str__(self) -> str: + return self.value + + @dataclass class Instance: """ @@ -49,7 +80,7 @@ class Instance: """ location: Tuple[Decimal, Decimal] - orientation: Literal["N", "S", "FN", "FS", "E", "W", "FW", "FE"] + orientation: Orientation @dataclass @@ -127,6 +158,48 @@ def __post_init__(self): "Macro definition invalid- at least one LEF file must be specified." ) + def __repr__(self) -> str: + return f"{self.__class__.__qualname__}(%s)" % ", ".join( + [f"{k}={repr(v)}" for k, v in asdict(self).items()] + ) + + def __str__(self) -> str: + return self.__repr__() + + @classmethod + def from_state(Self, state: State) -> "Macro": + kwargs = {} + for macro_field in fields(Self): + views = state.get(macro_field.name) + if views is None: + if macro_field.default_factory is not MISSING: + kwargs[macro_field.name] = macro_field.default_factory() + elif macro_field.default is not MISSING: + kwargs[macro_field.name] = macro_field.default + else: # gds or lef + raise ValueError( + f"Macro cannot be made out of input state: View {macro_field.name} is missing" + ) + continue + var_name = f"{Self.__name__}.{macro_field.name}" + _, final = Variable(var_name, macro_field.type, "").compile( + GenericDict({var_name: views}), + warning_list_ref=[], + permissive_typing=True, + ) + kwargs[macro_field.name] = final + + return Self(**kwargs) # type: ignore + + def instantiate( + self, + instance_name: str, + location: Tuple[Number, Number], + orientation: Orientation = Orientation.N, + ): + location = (Decimal(location[0]), Decimal(location[1])) + self.instances[instance_name] = Instance(location, Orientation[orientation]) + def is_optional(t: Type[Any]) -> bool: type_args = get_args(t) @@ -340,7 +413,7 @@ def __process( raw = value if isinstance(raw, list) or isinstance(raw, tuple): pass - elif isinstance(raw, str): + elif is_string(raw): if not permissive_typing: raise ValueError( f"Refusing to automatically convert string at '{key_path}' to list" @@ -502,12 +575,7 @@ def __process( if isinstance(value, list) and len(value) == 1: value = value[0] result = Path(value) - try: - result.validate() - except ValueError as e: - raise ValueError( - f"Path provided for variable '{key_path}' is invalid: '{e}'" - ) + result.validate(f"Path provided for variable '{key_path}' is invalid") return result elif validating_type == bool: if not permissive_typing and not isinstance(value, bool): diff --git a/openlane/container.py b/openlane/container.py index 2f71e45a2..87e47cb85 100644 --- a/openlane/container.py +++ b/openlane/container.py @@ -15,11 +15,11 @@ ## This file is internal to OpenLane 2 and is not part of the API. import os import re +import httpx import shlex import pathlib import getpass import tempfile -import requests import subprocess from typing import List, Sequence, Optional, Union, Tuple @@ -107,14 +107,15 @@ def remote_manifest_exists(image: str) -> bool: return False try: - request = requests.get(url, headers={"Accept": "application/json"}) - request.raise_for_status() - except requests.exceptions.ConnectionError: + httpx.Client(follow_redirects=True).get( + url, headers={"Accept": "application/json"} + ) + except httpx.NetworkError: err("Couldn't connect to the internet to pull container images.") return False - except requests.exceptions.HTTPError: + except httpx.HTTPStatusError as e: err( - f"The image {image} was not found. This may be because the CI for this image is running- in which case, please try again later." + f"The image {image} was not found. This may be because the CI for this image is running- in which case, please try again later. (error: {e})" ) return False return True diff --git a/openlane/flows/flow.py b/openlane/flows/flow.py index 0c53b909c..61fcc4941 100644 --- a/openlane/flows/flow.py +++ b/openlane/flows/flow.py @@ -60,7 +60,15 @@ deregister_additional_handler, options, ) -from ..common import get_tpe, mkdirp, protected, final, slugify, Toolbox +from ..common import ( + get_tpe, + mkdirp, + protected, + final, + slugify, + Toolbox, + get_latest_file, +) class FlowError(RuntimeError): @@ -251,7 +259,7 @@ class Flow(ABC): :cvar config_vars: A list of **flow-specific** configuration variables. These configuration variables are used entirely within the logic of the flow itself and - are not exposed to ``Step``s. + are not exposed to ``Step``\\s. :ivar step_objects: A list of :class:`Step` **objects** from the last run of the flow, @@ -483,7 +491,9 @@ def start( raise FlowException("last_run used without any existing runs") # Stored until next start() - self.run_dir = _force_run_dir or os.path.join(self.design_dir, "runs", tag) + self.run_dir = os.path.abspath( + _force_run_dir or os.path.join(self.design_dir, "runs", tag) + ) initial_state = with_initial_state or State() starting_ordinal = 1 @@ -495,7 +505,7 @@ def start( # Extract maximum step ordinal for entry in entries: - components = entry.split("-") + components = entry.split("-", maxsplit=1) if len(components) < 2: continue try: @@ -506,18 +516,7 @@ def start( # Extract Maximum State if with_initial_state is None: - latest_time = 0 - latest_json: Optional[str] = None - state_out_jsons = sorted( - glob.glob(os.path.join(self.run_dir, "*", "state_out.json")) - ) - for state_out_json in state_out_jsons: - time = os.path.getmtime(state_out_json) - if time > latest_time: - latest_time = time - latest_json = state_out_json - - if latest_json is not None: + if latest_json := get_latest_file(self.run_dir, "state_out.json"): verbose(f"Using state at '{latest_json}'.") initial_state = State.loads( diff --git a/openlane/flows/sequential.py b/openlane/flows/sequential.py index c99403d3c..109dc0b1f 100644 --- a/openlane/flows/sequential.py +++ b/openlane/flows/sequential.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations -import fnmatch import os from typing import Iterable, List, Set, Tuple, Optional, Type, Dict, Union from .flow import Flow, FlowException, FlowError +from ..common import Filter from ..state import State from ..logging import info, success, err, debug from ..steps import ( @@ -72,16 +72,11 @@ def __init_subclass__(Self, scm_type=None, name=None, **kwargs): step_id_set.add(step.id) for id, variable_names in Self.gating_config_vars.items(): - if id not in step_id_set: - found = False - for step_id in step_id_set: - if fnmatch.fnmatch(step_id, id): - found = True - break - if not found: - raise TypeError( - f"Gated Step '{id}' does not match any Step in Flow '{Self.__qualname__}'" - ) + matching_steps = list(Filter([id]).filter(step_id_set)) + if id not in step_id_set and len(matching_steps) < 1: + raise TypeError( + f"Gated Step '{id}' does not match any Step in Flow '{Self.__qualname__}'" + ) for var_name in variable_names: if var_name not in variables_by_name: raise TypeError( @@ -234,9 +229,8 @@ def run( if key in step_ids.values(): gating_cvars_expanded[key] = value continue - for id in step_ids.values(): - if fnmatch.fnmatch(id, key): - gating_cvars_expanded[id] = value + for id in Filter([key]).filter(step_ids.values()): + gating_cvars_expanded[id] = value current_state = initial_state for cls in self.Steps: diff --git a/openlane/scripts/klayout/xml_drc_report_to_json.py b/openlane/scripts/klayout/xml_drc_report_to_json.py index bf2cdb726..5914209ee 100644 --- a/openlane/scripts/klayout/xml_drc_report_to_json.py +++ b/openlane/scripts/klayout/xml_drc_report_to_json.py @@ -38,6 +38,8 @@ def cli(xml_file, json_file): with open(json_file, "w", encoding="utf8") as f: json.dump(json_database, f) + print(f"%OL_METRIC_I klayout__drc_error__count {total}") + if __name__ == "__main__": cli() diff --git a/openlane/scripts/klayout/xor.drc b/openlane/scripts/klayout/xor.drc index 36b957a0f..3d02aa2aa 100755 --- a/openlane/scripts/klayout/xor.drc +++ b/openlane/scripts/klayout/xor.drc @@ -117,6 +117,4 @@ end info "---" info "Total XOR differences: #{total_xor_differences}" -puts "%OL_CREATE_REPORT difference_count.rpt" -puts total_xor_differences -puts "%OL_END_REPORT" \ No newline at end of file +puts "%OL_METRIC_I design__xor_difference__count #{total_xor_differences}" diff --git a/openlane/scripts/odbpy/disconnected_pins.py b/openlane/scripts/odbpy/disconnected_pins.py index 8072b685e..2057f614e 100644 --- a/openlane/scripts/odbpy/disconnected_pins.py +++ b/openlane/scripts/odbpy/disconnected_pins.py @@ -99,7 +99,7 @@ def main( print(f"Found {disconnected_pins_count} disconnected pin(s).") - utl.metric_integer("design__disconnected_pins__count", disconnected_pins_count) + utl.metric_integer("design__disconnected_pin__count", disconnected_pins_count) if __name__ == "__main__": diff --git a/openlane/scripts/odbpy/filter_unannotated.py b/openlane/scripts/odbpy/filter_unannotated.py index 601e359ea..76c3fa5d3 100644 --- a/openlane/scripts/odbpy/filter_unannotated.py +++ b/openlane/scripts/odbpy/filter_unannotated.py @@ -87,10 +87,10 @@ def main(reader, corner, checks_report): print("Filtered nets:") pprint.pprint(connected_nets) utl.metric_integer( - f"timing__unannotated_nets__count__corner:{corner}", len(reported_nets) + f"timing__unannotated_net__count__corner:{corner}", len(reported_nets) ) utl.metric_integer( - f"timing__unannotated_nets_filtered__count__corner:{corner}", + f"timing__unannotated_net_filtered__count__corner:{corner}", len(connected_nets), ) print("done") diff --git a/openlane/state/__main__.py b/openlane/state/__main__.py new file mode 100644 index 000000000..ef1748608 --- /dev/null +++ b/openlane/state/__main__.py @@ -0,0 +1,61 @@ +# Copyright 2023 Efabless Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import json +from typing import Optional + +import cloup + +from ..common import get_latest_file +from ..common.cli import formatter_settings + + +@cloup.group( + no_args_is_help=True, + formatter_settings=formatter_settings, +) +def cli(): + pass + + +@cloup.command() +@cloup.option( + "--extract-metrics-to", + default=None, +) +@cloup.argument("run_dir") +def latest(extract_metrics_to: Optional[str], run_dir: str): + exit_code = 0 + + if latest_state := get_latest_file(run_dir, "state_*.json"): + try: + state = json.load(open(latest_state, encoding="utf8")) + except json.JSONDecodeError as e: + print(f"Latest state at {latest_state} is invalid: {e}", file=sys.stderr) + exit(1) + metrics = state["metrics"] + print(latest_state, end="") + if output := extract_metrics_to: + json.dump(metrics, open(output, "w", encoding="utf8")) + else: + print("No state_*.json files found", file=sys.stderr) + exit_code = 1 + + exit(exit_code) + + +cli.add_command(latest) + +if __name__ == "__main__": + cli() diff --git a/openlane/steps/checker.py b/openlane/steps/checker.py index e7565b7a2..e3fed8408 100644 --- a/openlane/steps/checker.py +++ b/openlane/steps/checker.py @@ -141,7 +141,7 @@ class DisconnectedPins(MetricChecker): name = "Disconnected Pins Checker" deferred = False - metric_name = "design__disconnected_pins__count" + metric_name = "design__disconnected_pin__count" metric_description = "Disconnected pins count" @@ -178,7 +178,7 @@ class LVS(MetricChecker): name = "LVS Error Checker" long_name = "Layout vs. Schematic Error Checker" - metric_name = "design__lvs_errors__count" + metric_name = "design__lvs_error__count" metric_description = "LVS errors" @@ -198,7 +198,7 @@ class LintErrors(MetricChecker): long_name = "Lint Errors Checker" deferred = False - metric_name = "design__lint_errors__count" + metric_name = "design__lint_error__count" metric_description = "Lint errors" @@ -209,7 +209,7 @@ class LintWarnings(MetricChecker): long_name = "Lint Warnings Checker" deferred = False - metric_name = "design__lint_warnings__count" + metric_name = "design__lint_warning__count" metric_description = "Lint warnings" @@ -220,7 +220,7 @@ class LintTimingConstructs(MetricChecker): long_name = "Lint Timing Errors Checker" deferred = False - metric_name = "design__lint_timing_constructs__count" + metric_name = "design__lint_timing_construct__count" metric_description = "Lint Timing Errors" def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: diff --git a/openlane/steps/klayout.py b/openlane/steps/klayout.py index a84499876..582088e0e 100644 --- a/openlane/steps/klayout.py +++ b/openlane/steps/klayout.py @@ -11,51 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json import os import shlex import sys import site import shutil import subprocess +from os.path import abspath from base64 import b64encode from typing import Any, Dict, Optional, List, Sequence, Tuple, Union from .step import ViewsUpdate, MetricsUpdate, Step, StepError, StepException +from ..config import Variable from ..logging import info, warn -from ..config import Variable, Config from ..state import DesignFormat, State -from ..common import Path, get_script_dir, Toolbox, mkdirp - - -def get_lef_args(config: Config, toolbox: Toolbox) -> List[str]: - tech_lefs = toolbox.filter_views(config, config["TECH_LEFS"]) - if len(tech_lefs) != 1: - raise StepException( - "Misconfigured SCL: 'TECH_LEFS' must return exactly one Tech LEF for its default timing corner." - ) - - lef_args = [ - "--input-lef", - str(tech_lefs[0]), - ] - - for lef in config["CELL_LEFS"]: - lef_args.append("--input-lef") - lef_args.append(str(lef)) - - macro_lefs = toolbox.get_macro_views(config, DesignFormat.LEF) - for lef in macro_lefs: - lef_args.append("--input-lef") - lef_args.append(str(lef)) - - if extra_lefs := config["EXTRA_LEFS"]: - for lef in extra_lefs: - lef_args.append("--input-lef") - lef_args.append(str(lef)) - - return lef_args +from ..common import Path, get_script_dir class KLayoutStep(Step): @@ -98,6 +69,68 @@ def run_subprocess( env["PYTHONPATH"] = ":".join(python_path_elements) return super().run_subprocess(cmd, log_to, silent, report_dir, env, **kwargs) + def get_cli_args( + self, + *, + layer_info: bool = True, + include_lefs: bool = False, + include_gds: bool = False, + ) -> List[str]: + result = [] + if layer_info: + lyp = abspath(self.config["KLAYOUT_PROPERTIES"]) + lyt = abspath(self.config["KLAYOUT_TECH"]) + lym = abspath(self.config["KLAYOUT_DEF_LAYER_MAP"]) + if None in [lyp, lyt, lym]: + raise StepError( + "Cannot open design in KLayout as the PDK does not appear to support KLayout." + ) + result += ["--lyp", lyp, "--lyt", lyt, "--lym", lym] + + if include_lefs: + tech_lefs = self.toolbox.filter_views(self.config, self.config["TECH_LEFS"]) + if len(tech_lefs) != 1: + raise StepException( + "Misconfigured SCL: 'TECH_LEFS' must return exactly one Tech LEF for its default timing corner." + ) + + lef_args = [ + "--input-lef", + abspath(tech_lefs[0]), + ] + + for lef in self.config["CELL_LEFS"]: + lef_args.append("--input-lef") + lef_args.append(abspath(lef)) + + macro_lefs = self.toolbox.get_macro_views(self.config, DesignFormat.LEF) + for lef in macro_lefs: + lef_args.append("--input-lef") + lef_args.append(abspath(lef)) + + if extra_lefs := self.config["EXTRA_LEFS"]: + for lef in extra_lefs: + lef_args.append("--input-lef") + lef_args.append(abspath(lef)) + + result += lef_args + + if include_gds: + gds_args = [] + for gds in self.config["CELL_GDS"]: + gds_args.append("--with-gds-file") + gds_args.append(gds) + for gds in self.toolbox.get_macro_views(self.config, DesignFormat.GDS): + gds_args.append("--with-gds-file") + gds_args.append(gds) + if extra_gds := self.config["EXTRA_GDS_FILES"]: + for gds in extra_gds: + gds_args.append("--with-gds-file") + gds_args.append(gds) + result += gds_args + + return result + @Step.factory.register() class Render(KLayoutStep): @@ -119,38 +152,17 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: if gds := state_in[DesignFormat.GDS]: input_view = gds - lyp = self.config["KLAYOUT_PROPERTIES"] - lyt = self.config["KLAYOUT_TECH"] - lym = self.config["KLAYOUT_DEF_LAYER_MAP"] - - tech_lefs = self.toolbox.filter_views(self.config, self.config["TECH_LEFS"]) - if len(tech_lefs) != 1: - raise StepError( - "Misconfigured SCL: 'TECH_LEFS' must return exactly one Tech LEF for its default timing corner." - ) - - lef_arguments = ["-l", str(tech_lefs[0])] - for file in self.config["CELL_LEFS"]: - lef_arguments += ["-l", str(file)] - if extra := self.config["EXTRA_LEFS"]: - for file in extra: - lef_arguments += ["-l", str(file)] + assert isinstance(input_view, Path) self.run_subprocess( [ sys.executable, os.path.join(get_script_dir(), "klayout", "render.py"), - input_view, + abspath(input_view), "--output", - os.path.join(self.step_dir, "out.png"), - "--lyp", - lyp, - "--lyt", - lyt, - "--lym", - lym, + abspath(os.path.join(self.step_dir, "out.png")), ] - + lef_arguments, + + self.get_cli_args(include_lefs=True), silent=True, ) @@ -177,40 +189,12 @@ class StreamOut(KLayoutStep): outputs = [DesignFormat.GDS, DesignFormat.KLAYOUT_GDS] def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: - lyp = self.config["KLAYOUT_PROPERTIES"] - lyt = self.config["KLAYOUT_TECH"] - lym = self.config["KLAYOUT_DEF_LAYER_MAP"] - if None in [lyp, lyt, lym]: - if self.config["PRIMARY_GDSII_STREAMOUT_TOOL"] == "klayout": - raise StepError( - "One of KLAYOUT_PROPERTIES, KLAYOUT_TECH or KLAYOUT_DEF_LAYER_MAP is unset, yet, KLayout is set as the primary sign-off tool." - ) - warn( - "One of KLAYOUT_PROPERTIES, KLAYOUT_TECH or KLAYOUT_DEF_LAYER_MAP is unset. Returning state unaltered…" - ) - return {}, {} - views_updates: ViewsUpdate = {} klayout_gds_out = os.path.join( self.step_dir, f"{self.config['DESIGN_NAME']}.{DesignFormat.KLAYOUT_GDS.value.extension}", ) - - layout_args = [] - layout_args += get_lef_args(self.config, self.toolbox) - - for gds in self.config["CELL_GDS"]: - layout_args.append("--with-gds-file") - layout_args.append(gds) - for gds in self.toolbox.get_macro_views(self.config, DesignFormat.GDS): - layout_args.append("--with-gds-file") - layout_args.append(gds) - if extra_gds := self.config["EXTRA_GDS_FILES"]: - for gds in extra_gds: - layout_args.append("--with-gds-file") - layout_args.append(gds) - kwargs, env = self.extract_env(kwargs) self.run_subprocess( @@ -223,17 +207,11 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: ), state_in[DesignFormat.DEF.value.id], "--output", - klayout_gds_out, - "--lyt", - lyt, - "--lyp", - lyp, - "--lym", - lym, + abspath(klayout_gds_out), "--top", self.config["DESIGN_NAME"], ] - + layout_args, + + self.get_cli_args(include_lefs=True, include_gds=True), env=env, ) @@ -309,6 +287,9 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: warn("No KLayout stream-out has been performed. Skipping XOR process…") return {}, {} + assert isinstance(layout_a, Path) + assert isinstance(layout_b, Path) + kwargs, env = self.extract_env(kwargs) tile_size_options = [] @@ -318,7 +299,7 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: thread_count = self.config["KLAYOUT_XOR_THREADS"] or os.cpu_count() or 1 info(f"Running XOR with {thread_count} threads…") - self.run_subprocess( + metric_updates = self.run_subprocess( [ "ruby", os.path.join( @@ -327,25 +308,21 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: "xor.drc", ), "--output", - os.path.join(self.step_dir, "xor.xml"), + abspath(os.path.join(self.step_dir, "xor.xml")), "--top", self.config["DESIGN_NAME"], "--threads", thread_count, "--ignore", ignored, - layout_a, - layout_b, + abspath(layout_a), + abspath(layout_b), ] + tile_size_options, env=env, ) - difference_count = int( - open(os.path.join(self.step_dir, "difference_count.rpt")).read().strip() - ) - - return {}, {"design__xor_difference__count": difference_count} + return {}, metric_updates @Step.factory.register() @@ -369,7 +346,7 @@ class DRC(KLayoutStep): Variable( "KLAYOUT_DRC_OPTIONS", Optional[Dict[str, Union[bool, int]]], - "Options availble to KLayout DRC runset. They vary from one PDK to another.", + "Options passed directly to the KLayout DRC runset. They vary from one PDK to another.", pdk=True, ), Variable( @@ -381,14 +358,10 @@ class DRC(KLayoutStep): ] def run_sky130(self, state_in: State, **kwargs) -> MetricsUpdate: - reports_folder = os.path.join(self.step_dir, "reports") - mkdirp(reports_folder) - - metrics_updates: MetricsUpdate = {} - drc_script_path = self.config["KLAYOUT_DRC_RUNSET"] kwargs, env = self.extract_env(kwargs) - xml_report = os.path.realpath(os.path.join(reports_folder, "violations.xml")) - json_report = os.path.realpath(os.path.join(reports_folder, "violations.json")) + drc_script_path = self.config["KLAYOUT_DRC_RUNSET"] + xml_report = os.path.join(self.step_dir, "violations.xml") + json_report = os.path.join(self.step_dir, "violations.json") feol = str(self.config["KLAYOUT_DRC_OPTIONS"]["feol"]).lower() beol = str(self.config["KLAYOUT_DRC_OPTIONS"]["beol"]).lower() floating_metal = str( @@ -399,6 +372,9 @@ def run_sky130(self, state_in: State, **kwargs) -> MetricsUpdate: threads = self.config["KLAYOUT_DRC_THREADS"] or (str(os.cpu_count()) or "1") info(f"Running KLayout DRC with {threads} threads…") + input_view = state_in[DesignFormat.GDS] + assert isinstance(input_view, Path) + self.run_subprocess( [ "klayout", @@ -407,11 +383,11 @@ def run_sky130(self, state_in: State, **kwargs) -> MetricsUpdate: "-r", drc_script_path, "-rd", - f"input={str(state_in[DesignFormat.GDS])}", + f"input={abspath(input_view)}", "-rd", - f"topcell={str(self.config['DESIGN_NAME'])}", + f"topcell={self.config['DESIGN_NAME']}", "-rd", - f"report={xml_report}", + f"report={abspath(xml_report)}", "-rd", f"feol={feol}", "-rd", @@ -427,7 +403,8 @@ def run_sky130(self, state_in: State, **kwargs) -> MetricsUpdate: ], env=env, ) - self.run_subprocess( + + return self.run_subprocess( [ "python3", os.path.join( @@ -435,17 +412,13 @@ def run_sky130(self, state_in: State, **kwargs) -> MetricsUpdate: "klayout", "xml_drc_report_to_json.py", ), - f"--xml-file={xml_report}", - f"--json-file={json_report}", + f"--xml-file={abspath(xml_report)}", + f"--json-file={abspath(json_report)}", ], env=env, log_to=os.path.join(self.step_dir, "xml_drc_report_to_json.log"), ) - with open(json_report, "r") as f: - metrics_updates["klayout__drc_error__count"] = json.load(f)["total"] - return metrics_updates - def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: metrics_updates: MetricsUpdate = {} if self.config["PDK"] in ["sky130A", "sky130B"]: @@ -487,17 +460,7 @@ class OpenGUI(KLayoutStep): ] def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: - lyp = self.config["KLAYOUT_PROPERTIES"] - lyt = self.config["KLAYOUT_TECH"] - lym = self.config["KLAYOUT_DEF_LAYER_MAP"] - if None in [lyp, lyt, lym]: - raise StepError( - "Cannot open design in KLayout as the PDK does not appear to support KLayout." - ) - - lefs = get_lef_args(self.config, self.toolbox) kwargs, env = self.extract_env(kwargs) - mode_args = [] if self.config["KLAYOUT_EDITOR_MODE"]: mode_args.append("--editor") @@ -506,18 +469,13 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: if self.config["KLAYOUT_PRIORITIZE_GDS"]: if gds := state_in[DesignFormat.GDS]: layout = gds + assert isinstance(layout, Path) env["KLAYOUT_ARGV"] = shlex.join( [ - "--lyt", - str(lyt), - "--lyp", - str(lyp), - "--lym", - str(lym), - str(layout), + abspath(layout), ] - + lefs + + self.get_cli_args(include_lefs=True) ) cmd = ( diff --git a/openlane/steps/magic.py b/openlane/steps/magic.py index 5467a3846..3ab1631e7 100644 --- a/openlane/steps/magic.py +++ b/openlane/steps/magic.py @@ -108,7 +108,7 @@ def get_command(self) -> List[str]: "-dnull", "-noconsole", "-rcfile", - str(self.config["MAGICRC"]), + os.path.abspath(self.config["MAGICRC"]), os.path.join(get_script_dir(), "magic", "wrapper.tcl"), ] diff --git a/openlane/steps/netgen.py b/openlane/steps/netgen.py index a678a456a..ff5ebc170 100644 --- a/openlane/steps/netgen.py +++ b/openlane/steps/netgen.py @@ -85,12 +85,12 @@ def flatten(list): ) metrics = {} metrics["design__lvs_device_difference__count"] = device_differences - metrics["design__lvs_net_differences__count"] = net_differences - metrics["design__lvs_property_fails__count"] = property_fails - metrics["design__lvs_errors__count"] = total_errors - metrics["design__lvs_unmatched_devices__count"] = device_fails - metrics["design__lvs_unmatched_nets__count"] = net_fails - metrics["design__lvs_unmatched_pins__count"] = pin_fails + metrics["design__lvs_net_difference__count"] = net_differences + metrics["design__lvs_property_fail__count"] = property_fails + metrics["design__lvs_error__count"] = total_errors + metrics["design__lvs_unmatched_device__count"] = device_fails + metrics["design__lvs_unmatched_net__count"] = net_fails + metrics["design__lvs_unmatched_pin__count"] = pin_fails return metrics diff --git a/openlane/steps/openroad.py b/openlane/steps/openroad.py index ef61d99ce..9518ff7b3 100644 --- a/openlane/steps/openroad.py +++ b/openlane/steps/openroad.py @@ -27,9 +27,6 @@ from abc import abstractmethod from concurrent.futures import Future from typing import ( - Any, - Callable, - Iterable, List, Dict, Literal, @@ -81,28 +78,6 @@ met5 Y 1.70 3.40 """ -timing_metric_aggregation: Dict[str, Tuple[Any, Callable[[Iterable], Any]]] = { - "timing__hold_vio__count": (0, lambda x: sum(x)), - "timing__hold_r2r_vio__count": (0, lambda x: sum(x)), - "timing__setup_vio__count": (0, lambda x: sum(x)), - "timing__setup_r2r_vio__count": (0, lambda x: sum(x)), - "design__max_slew_violation__count": (0, lambda x: sum(x)), - "design__max_fanout_violation__count": (0, lambda x: sum(x)), - "design__max_cap_violation__count": (0, lambda x: sum(x)), - "clock__skew__worst_hold": (-inf, max), - "clock__skew__worst_setup": (-inf, max), - "timing__hold__ws": (inf, min), - "timing__hold_r2r__ws": (inf, min), - "timing__setup__ws": (inf, min), - "timing__setup_r2r__ws": (inf, min), - "timing__hold__wns": (inf, min), - "timing__setup__wns": (inf, min), - "timing__hold__tns": (0, lambda x: sum(x)), - "timing__setup__tns": (0, lambda x: sum(x)), - "timing__unannotated_nets__count": (0, max), - "timing__unannotated_nets_filtered__count": (0, max), -} - def old_to_new_tracks(old_tracks: str) -> str: """ @@ -280,10 +255,7 @@ def run(self, state_in, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: or_metrics_out[key] = -inf metrics_updates.update(or_metrics_out) - metric_updates_with_aggregates = aggregate_metrics( - metrics_updates, - timing_metric_aggregation, - ) + metric_updates_with_aggregates = aggregate_metrics(metrics_updates) return views_updates, metric_updates_with_aggregates @@ -566,9 +538,7 @@ def run_corner(corner: str): for corner, updates_future in futures.items(): metrics_updates.update(updates_future.result()) - metric_updates_with_aggregates = aggregate_metrics( - metrics_updates, timing_metric_aggregation - ) + metric_updates_with_aggregates = aggregate_metrics(metrics_updates) def format_count(count: Optional[Union[int, float, Decimal]]) -> str: if count is None: @@ -1073,7 +1043,7 @@ class CheckAntennas(OpenROADStep): Runs OpenROAD to check if one or more long nets may constitute an `antenna risk `_. - The metric ``route__antenna_violations__count`` will be updated with the number of violating nets. + The metric ``route__antenna_violation__count`` will be updated with the number of violating nets. """ id = "OpenROAD.CheckAntennas" @@ -1186,7 +1156,7 @@ def __lt__(self, other): def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: views_updates, metrics_updates = super().run(state_in, **kwargs) - metrics_updates["route__antenna_violations__count"] = get_antenna_nets( + metrics_updates["route__antenna_violation__count"] = get_antenna_nets( open(os.path.join(self.step_dir, "antenna.rpt")), open(os.path.join(self.step_dir, "antenna_net_list.txt"), "w"), ) @@ -1208,10 +1178,10 @@ class GlobalRouting(CheckAntennas): Estimated capacitance and resistance values are much more accurate for global routing. - Updates the ``route__antenna_violations__count`` metric. + Updates the ``route__antenna_violation__count`` metric. At this stage, `antenna effect `_ - mitigations may also be applied, updating the `route__antenna_violations__count` count. + mitigations may also be applied, updating the `route__antenna_violation__count` count. See the variables for more info. """ diff --git a/openlane/steps/step.py b/openlane/steps/step.py index f361618c4..c07a450af 100644 --- a/openlane/steps/step.py +++ b/openlane/steps/step.py @@ -834,10 +834,10 @@ def start( :param toolbox: The flow's :class:`Toolbox` object, required. - If running in interactive mode, you may omit this argument as ``None``, + If running in interactive mode, you may omit this argument as ``None``\\, where a global toolbox will be used instead. - If running inside a flow, you may also omit this argument as ``None``, + If running inside a flow, you may also omit this argument as ``None``\\, where the flow's toolbox will used to be instead. :param **kwargs: Passed on to subprocess execution: useful if you want to @@ -935,7 +935,7 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: The "core" of a step. This step is considered per-object private, i.e., if a Step's run is - called anywhere outside of the same object's :meth:`start`, its behavior + called anywhere outside of the same object's :meth:`start`\\, its behavior is undefined. :param state_in: The input state. @@ -944,7 +944,7 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: resolved before use first otherwise. For reference, ``start()`` is responsible for resolving it - for ``.run()``. + for ``.run()``\\. :param **kwargs: Passed on to subprocess execution: useful if you want to redirect stdin, stdout, etc. @@ -978,27 +978,27 @@ def run_subprocess( makes a number of special functions accessible to subprocesses by simply printing keywords in the terminal, such as: - * ``%OL_CREATE_REPORT ``: Starts redirecting all output from + * ``%OL_CREATE_REPORT ``\\: Starts redirecting all output from standard output to a report file inside the step directory, with the name . * ``%OL_END_REPORT``: Stops redirection behavior. - * ``%OL_METRIC ``: Adds a string metric with the name - and the value to this function's returned object. - * ``%OL_METRIC_F ``: Adds a floating-point metric with the - name and the value to this function's returned object. - * ``%OL_METRIC_I ``: Adds an integer metric with the name - and the value to this function's returned object. + * ``%OL_METRIC ``\\: Adds a string metric with the name + and the value to this function's returned object. + * ``%OL_METRIC_F ``\\: Adds a floating-point metric with the + name and the value to this function's returned object. + * ``%OL_METRIC_I ``\\: Adds an integer metric with the name + and the value to this function's returned object. :param cmd: A list of variables, representing a program and its arguments, similar to how you would use it in a shell. :param log_to: An optional override for the log path from - :meth:`get_log_path`. Useful for if you run multiple subprocesses + :meth:`get_log_path`\\. Useful for if you run multiple subprocesses within one step. :param silent: If specified, the subprocess does not print anything to the terminal. Useful when running multiple processes simultaneously. :param **kwargs: Passed on to subprocess execution: useful if you want to redirect stdin, stdout, etc. - :returns: A dictionary of any metrics generated using the %OL_METRIC{,_I,_F} + :returns: A dictionary of any metrics generated using the ``%OL_METRIC{,_I,_F}`` directive. :raises subprocess.CalledProcessError: If the process has a non-zero exit, this exception will be raised. diff --git a/openlane/steps/tclstep.py b/openlane/steps/tclstep.py index 86d60694f..410a9759f 100644 --- a/openlane/steps/tclstep.py +++ b/openlane/steps/tclstep.py @@ -178,7 +178,7 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: return super().run(state_in, env=env, **kwargs) This will allow you to add further custom environment variables to a call - while still respecting an `env` argument further up the call-stack. + while still respecting an ``env`` argument further up the call-stack. :param state_in: See superclass. :param **kwargs: Passed on to subprocess execution: useful if you want to diff --git a/openlane/steps/verilator.py b/openlane/steps/verilator.py index 8c9dc397f..aa7aa6226 100644 --- a/openlane/steps/verilator.py +++ b/openlane/steps/verilator.py @@ -166,11 +166,11 @@ def run(self, state_in: State, **kwargs) -> Tuple[ViewsUpdate, MetricsUpdate]: if exit_error is not None and errors_count == 0: raise StepException(f"Verilator exited unexpectedly: {exit_error}") - metrics_updates.update({"design__lint_errors__count": errors_count}) + metrics_updates.update({"design__lint_error__count": errors_count}) metrics_updates.update( - {"design__lint_timing_constructs__count": timing_constructs} + {"design__lint_timing_construct__count": timing_constructs} ) - metrics_updates.update({"design__lint_warnings__count": warnings_count}) + metrics_updates.update({"design__lint_warning__count": warnings_count}) metrics_updates.update({"design__inferred_latch__count": latch_count}) return views_updates, metrics_updates diff --git a/requirements.txt b/requirements.txt index 6a548c176..4d2efed96 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,12 +2,12 @@ click>=8,<9 cloup>=1.0.1,<2 pyyaml>=5,<7 rich>=12,<13 -requests>=2.27,<3 volare>=0.16.0 lxml>=4.9.0 deprecated>=1.2.10,<2 immutabledict>=2,<3 libparse>=0.3.1,<1 psutil>=5.9.0 +httpx>=0.22.0, <0.28 ioplace_parser~=0.1.0 klayout==0.28.15 diff --git a/requirements_dev.txt b/requirements_dev.txt index 29bfd16a4..c61d2a494 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -20,13 +20,13 @@ types-docutils types-decorator types-commonmark types-colorama -types-requests types-Pygments types-Deprecated types-psutil # test pytest +pytest-xdist coverage pyfakefs>=5.2.3,<6 pillow>=10.0.1,<11 diff --git a/test/common/test_common.py b/test/common/test_common.py index 4d15d7e09..27b919205 100644 --- a/test/common/test_common.py +++ b/test/common/test_common.py @@ -49,7 +49,7 @@ def test_parse_metric_modifiers(): ), "Improperly parsed metric without modifiers" assert parse_metric_modifiers( - "category__name__optional_name_modifier__etc__mod1:one:mod2:two" + "category__name__optional_name_modifier__etc__mod1:one__mod2:two" ) == ( "category__name__optional_name_modifier__etc", {"mod1": "one", "mod2": "two"}, @@ -58,9 +58,9 @@ def test_parse_metric_modifiers(): assert parse_metric_modifiers( "category__name__optional_name_modifier__etc:etc:etc", ) == ( - "category__name__optional_name_modifier__etc:etc:etc", - {}, - ), "Improperly parsed metric with improper modifier syntax" + "category__name__optional_name_modifier", + {"etc": "etc:etc"}, + ), "Improperly parsed metric with modifier containing a colon" @pytest.mark.parametrize( diff --git a/test/common/test_misc_utils.py b/test/common/test_misc_utils.py index 09350b150..a0d95bde8 100644 --- a/test/common/test_misc_utils.py +++ b/test/common/test_misc_utils.py @@ -128,3 +128,35 @@ def test_klayout_xml(): pytest.fail(f"Unexpected error while attempting to parse generated XML: {e}") assert parsed.find(".//categories/category[1]/name").text == "LU.3" + + +def test_filter_filter(): + from openlane.common import Filter + + assert ( + list(Filter([]).filter(["a", "b", "c"])) == [] + ), "filter with no wildcards matches nothing" + + assert ( + list(Filter(["*", "!b"]).filter(["b"])) == [] + ), "filter with deny wildcard did not work properly" + + assert list(Filter(["*", "!b"]).filter(["b", "be"])) == [ + "be" + ], "filter with deny wildcard matched too many elements" + + assert list( + Filter(["boing*", "!boinger", "boinge*"]).filter(["boingee", "boinger"]) + ) == ["boingee"], "filter with a mixture of wildcards failed" + + +def test_filter_all_matching(): + from openlane.common import Filter + + assert list(Filter(["k", "!b"]).get_matching_wildcards("c")) == [ + "b" + ], "filter did not accurately return rejecting wildcard" + + assert list(Filter(["*", "!c"]).get_matching_wildcards("c")) == [ + "*", + ], "filter did not accurately return accepting wildcard" diff --git a/test/config/test_preprocessor.py b/test/config/test_preprocessor.py index dc027c745..f0e760a07 100644 --- a/test/config/test_preprocessor.py +++ b/test/config/test_preprocessor.py @@ -89,9 +89,12 @@ def test_process_string(): ["/cwd"], ) - assert ( - process_string("refg::$A", {"A": "B"}, ["/cwd"]) == [] - ), "refg:: on non-existent directory not working" + assert process_string("refg::$A/*", {"A": "B"}, ["/cwd"]) == [ + "B/*" + ], "refg:: on non-existent directory not working" + assert process_string("refg::$A", {"A": "B"}, ["/cwd"]) == [ + "B" + ], "refg:: without asterisks or ? did not return the same file path" mmpt_raw = { diff --git a/test/config/test_variable.py b/test/config/test_variable.py index 3de9e1d9a..a64efa37f 100644 --- a/test/config/test_variable.py +++ b/test/config/test_variable.py @@ -42,6 +42,60 @@ def test_macro_validation(): Macro(gds=["test"], lef=["test"], lefs=[]) +def test_macro_from_state(): + from openlane.common import Path + from openlane.config import Macro + from openlane.state import State + + state_in = State( + { + "nl": Path._dummy_path, + "pnl": Path._dummy_path, + "def": Path._dummy_path, + "odb": Path._dummy_path, + "sdf": { + "corner_1": Path._dummy_path, + "corner_2": Path._dummy_path, + }, + "spef": { + "corner_*": Path._dummy_path, + }, + "lib": { + "corner_1": Path._dummy_path, + "corner_2": Path._dummy_path, + }, + "gds": Path._dummy_path, + }, + metrics={}, + ) + + with pytest.raises( + ValueError, + match="Macro cannot be made out of input state: View lef is missing", + ): + Macro.from_state(state_in) + + state_fixed = State(state_in, overrides={"lef": Path._dummy_path}) + macro = Macro.from_state(state_fixed) + assert macro == Macro( + gds=[Path("__openlane_dummy_path")], + lef=[Path("__openlane_dummy_path")], + instances={}, + nl=[Path("__openlane_dummy_path")], + spef={"corner_*": [Path("__openlane_dummy_path")]}, + lib={ + "corner_1": [Path("__openlane_dummy_path")], + "corner_2": [Path("__openlane_dummy_path")], + }, + spice=[], + sdf={ + "corner_1": [Path("__openlane_dummy_path")], + "corner_2": [Path("__openlane_dummy_path")], + }, + json_h=None, + ), "Macro was not derived from State correctly" + + def test_is_optional(): from openlane.config.variable import is_optional diff --git a/test/steps/all b/test/steps/all index 4283de670..cedaedd09 160000 --- a/test/steps/all +++ b/test/steps/all @@ -1 +1 @@ -Subproject commit 4283de670621a210d086cd56f77502ea2c8b99c4 +Subproject commit cedaedd092cdc07a5f2d74a36b4e499d0df26c41