diff --git a/rules/klayout/drc/testing/Makefile b/rules/klayout/drc/testing/Makefile index d7b51cb0..0ab6e8f7 100644 --- a/rules/klayout/drc/testing/Makefile +++ b/rules/klayout/drc/testing/Makefile @@ -78,73 +78,14 @@ test-DRC-gf180mcu_fd_sc_mcu9t5v0: print_klayout_version #================================= .ONESHELL: -test-DRC-main : print_klayout_version test-DRC-foundry-Option-1 test-DRC-foundry-Option-2 test-DRC-foundry-Option-3 test-DRC-foundry-Option-4 test-DRC-Option-A test-DRC-Option-B test-DRC-Option-C +test-DRC-main : print_klayout_version + @python3 run_regression.py @echo "========== DRC-Regression is done ==========" .ONESHELL: -test-DRC-foundry-Option-1: - @cd $(Testing_DIR) - @echo "========== DRC-Regression foundry-Option-1 ==========" - @python3 run_regression.py \ - --path=testcases/0.0.DM000013_13_1P6M_6kA_MIMA_SOLDER_BUMP.gds.gz \ - --metal_top="6K" --mim_option="A" --metal_level=6 --run_name="DRC-foundry-Option-1" - -.ONESHELL: -test-DRC-foundry-Option-2: - @cd $(Testing_DIR) - @echo "========== DRC-Regression foundry-Option-2 ==========" - @python3 run_regression.py \ - --path=testcases/0.0.DM000013_13_1P6M_9kA_MIMB_WEDGE.gds.gz \ - --metal_top="9K" --mim_option="B" --metal_level=6 --run_name="DRC-foundry-Option-2" - -.ONESHELL: -test-DRC-foundry-Option-3: - @cd $(Testing_DIR) - @echo "========== DRC-Regression foundry-Option-3 ==========" - @python3 run_regression.py \ - --path=testcases/0.0.DM000013_13_1P6M_11kA_MIMA_Gold_Bump.gds.gz \ - --metal_top="11K" --mim_option="A" --metal_level=6 --run_name="DRC-foundry-Option-3" - -.ONESHELL: -test-DRC-foundry-Option-4: - @cd $(Testing_DIR) - @echo "========== DRC-Regression foundry-Option-4 ==========" - @python3 run_regression.py \ - --path=testcases/0.0.DM000013_13_1P6M_30kA_MIMB_BALL.gds.gz \ - --metal_top="30K" --mim_option="B" --metal_level=6 --run_name="DRC-foundry-Option-4" - -.ONESHELL: -test-DRC-Option-A: - @cd $(Testing_DIR) - @echo "========== DRC-Regression Option-A ==========" - @python3 run_regression.py \ - --path=testcases/0.0.DM000013_13_1P6M_11kA_MIMA_Gold_Bump.gds.gz \ - --metal_top="30K" --mim_option="A" --metal_level=3 --run_name="DRC-Option-A" - -.ONESHELL: -test-DRC-Option-B: - @cd $(Testing_DIR) - @echo "========== DRC-Regression Option-B ==========" - @python3 run_regression.py \ - --path=testcases/0.0.DM000013_13_1P6M_11kA_MIMA_Gold_Bump.gds.gz \ - --metal_top="11K" --mim_option="B" --metal_level=4 --run_name="DRC-Option-B" - -.ONESHELL: -test-DRC-Option-C: - @cd $(Testing_DIR) - @echo "========== DRC-Regression Option-C ==========" - @python3 run_regression.py \ - --path=testcases/0.0.DM000013_13_1P6M_11kA_MIMA_Gold_Bump.gds.gz \ - --path=testcases/Manual_testcases.gds \ - --path=testcases/density_testcases/Mn_4_fail.gds \ - --path=testcases/density_testcases/Mn_4_pass.gds \ - --path=testcases/density_testcases/MT30_7_fail.gds \ - --path=testcases/density_testcases/MT30_7_pass.gds \ - --path=testcases/density_testcases/MT_3_fail.gds \ - --path=testcases/density_testcases/MT_3_pass.gds \ - --path=testcases/density_testcases/PL_8_fail.gds \ - --path=testcases/density_testcases/PL_8_pass.gds \ - --metal_top="9K" --mim_option="B" --metal_level=5 --run_name="DRC-Option-C" +test-DRC-% : print_klayout_version + @python3 run_regression.py --table=$* + @echo "========== DRC-Regression is done ==========" #================================= # -------- test-DRC-switch ------- @@ -182,10 +123,5 @@ help: @echo "... test-DRC-gf180mcu_fd_sc_mcu7t5v0 (To run standard cells 7 tracks DRC regression )" @echo "... test-DRC-gf180mcu_fd_sc_mcu9t5v0 (To run standard cells 9 tracks DRC regression )" @echo "... test-DRC-main (To run main DRC full regression )" - @echo "... test-DRC-foundry-Option-1 (To run main DRC regression using foundry-Option-1)" - @echo "... test-DRC-foundry-Option-2 (To run main DRC regression using foundry-Option-2)" - @echo "... test-DRC-foundry-Option-3 (To run main DRC regression using foundry-Option-3)" - @echo "... test-DRC-foundry-Option-4 (To run main DRC regression using foundry-Option-4)" - @echo "... test-DRC-Option-A (To run main DRC regression using Option-A )" - @echo "... test-DRC-Option-B (To run main DRC regression using Option-B )" - @echo "... test-DRC-Option-C (To run main DRC regression using Option-C )" + @echo "... test-DRC-[table_name] (To run main DRC regression on specific table )" + diff --git a/rules/klayout/drc/testing/run_regression.py b/rules/klayout/drc/testing/run_regression.py index 344d8b3b..d3c8eddc 100644 --- a/rules/klayout/drc/testing/run_regression.py +++ b/rules/klayout/drc/testing/run_regression.py @@ -28,24 +28,93 @@ """ from subprocess import check_call +from subprocess import Popen, PIPE import concurrent.futures import traceback - -import re +import yaml +import shutil from docopt import docopt import os -import datetime +from datetime import datetime import xml.etree.ElementTree as ET import time import pandas as pd import logging import glob from pathlib import Path +from tqdm import tqdm +import re +import gdstk + SUPPORTED_TC_EXT = "gds" +SUPPORTED_SW_EXT = "yaml" + + +def check_klayout_version(): + """ + check_klayout_version checks klayout version and makes sure it would work with the DRC. + """ + # ======= Checking Klayout version ======= + klayout_v_ = os.popen("klayout -b -v").read() + klayout_v_ = klayout_v_.split("\n")[0] + klayout_v_list = [] + + if klayout_v_ == "": + logging.error("Klayout is not found. Please make sure klayout is installed.") + exit(1) + else: + klayout_v_list = [int(v) for v in klayout_v_.split(" ")[-1].split(".")] + + logging.info(f"Your Klayout version is: {klayout_v_}") + + if len(klayout_v_list) < 1 or len(klayout_v_list) > 3: + logging.error("Was not able to get klayout version properly.") + exit(1) + elif len(klayout_v_list) == 2: + if klayout_v_list[1] < 28: + logging.warning("Prerequisites at a minimum: KLayout 0.28.0") + logging.error( + "Using this klayout version has not been assesed in this development. Limits are unknown" + ) + exit(1) + elif len(klayout_v_list) == 3: + if klayout_v_list[1] < 28 : + logging.warning("Prerequisites at a minimum: KLayout 0.28.0") + logging.error( + "Using this klayout version has not been assesed in this development. Limits are unknown" + ) + exit(1) + + +def get_switches(yaml_file, rule_name): + """Parse yaml file and extract switches data + Parameters + ---------- + yaml_file : str + yaml config file path given py the user. + Returns + ------- + yaml_dic : dictionary + dictionary containing switches data. + """ + + # load yaml config data + with open(yaml_file, 'r') as stream: + try: + yaml_dic = yaml.safe_load(stream) + except yaml.YAMLError as exc: + print(exc) + switches = list() + for param, value in yaml_dic[rule_name].items(): + switch = f"{param}={value}" + switches.append(switch) -def parse_results_db(results_database): + return switches + + +def parse_results_db(test_rule, results_database): """ This function will parse Klayout database for analysis. @@ -53,7 +122,7 @@ def parse_results_db(results_database): ---------- results_database : string or Path object Path string to the results file - + Returns ------- set @@ -62,13 +131,24 @@ def parse_results_db(results_database): mytree = ET.parse(results_database) myroot = mytree.getroot() + # Initial values for counter + pass_patterns = 0 + fail_patterns = 0 + falsePos = 0 + falseNeg = 0 - all_violating_rules = set() - for z in myroot[7]: # myroot[7] : List rules with viloations - all_violating_rules.add(f"{z[1].text}".replace("'", "")) + for z in myroot[7]: + if f"'{test_rule}_pass_patterns'" == f"{z[1].text}": + pass_patterns += 1 + if f"'{test_rule}_fail_patterns'" == f"{z[1].text}": + fail_patterns += 1 + if f"'{test_rule}_false_positive'" == f"{z[1].text}": + falsePos += 1 + if f"'{test_rule}_false_negative'" == f"{z[1].text}": + falseNeg += 1 - return all_violating_rules + return pass_patterns, fail_patterns, falsePos, falseNeg def run_test_case( @@ -76,10 +156,8 @@ def run_test_case( drc_dir, layout_path, run_dir, - test_type, test_table, test_rule, - thrCount, switches="", ): """ @@ -95,49 +173,87 @@ def run_test_case( Path string to the layout of the test pattern we want to test. run_dir : stirng or Path object Path to the location where is the regression run is done. - test_type : string - Type of the test case either pass or fail. - test_rule : string - Rule under test switches : string String that holds all the DRC run switches required to enable this. - + Returns ------- pd.DataFrame A pandas DataFrame with the rule and rule deck used. """ - pattern_clean = layout_path.with_suffix("").stem - output_loc = f"{run_dir}/{test_table}/{test_rule}/{test_type}_patterns" - pattern_results = f"{output_loc}/{pattern_clean}_database.lyrdb" + # Initial value for counters + falsePos_count = 0 + falseNeg_count = 0 + pass_patterns_count = 0 + fail_patterns_count = 0 + + # Get switches used for each run + sw_file = os.path.join(Path(layout_path.parent.parent).absolute(), f"{test_rule}.{SUPPORTED_SW_EXT}") + + if os.path.exists(sw_file): + switches = " ".join(get_switches(sw_file, test_rule)) + else: + switches = "--variant=C" # default switch + + # Adding switches for specific runsets + if "antenna" in runset_file: + switches += " --antenna_only" + elif "density" in runset_file: + switches += " --density_only" + + # Creating run folder structure + pattern_clean = ".".join(os.path.basename(layout_path).split(".")[:-1]) + output_loc = f"{run_dir}/{test_table}_data" pattern_log = f"{output_loc}/{pattern_clean}_drc.log" - if runset_file == "nan" or runset_file is None or runset_file == "": - return "cannot_find_rule" + # command to run drc + call_str = f"python3 {drc_dir}/run_drc.py --path={layout_path} {switches} --table={test_table} --run_dir={output_loc} --run_mode=flat --thr=1 > {pattern_log} 2>&1" + + # Checking if run is already done before + if os.path.exists(output_loc) and os.path.isdir(output_loc): + pass + else: + os.makedirs(output_loc, exist_ok=True) + # Starting klayout run + try: + check_call(call_str, shell=True) + except Exception as e: + pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) + if len(pattern_results) < 1: + logging.error("%s generated an exception: %s" % (pattern_clean, e)) + traceback.print_exc() + raise + + # Checking if run is completed or failed + pattern_results = glob.glob(os.path.join(output_loc, f"{pattern_clean}*.lyrdb")) + + if len(pattern_results) > 0: + # db to gds conversion + marker_output,runset_analysis = convert_results_db_to_gds(pattern_results[0]) + + # Generating merged testcase for violated rules + merged_output = generate_merged_testcase(layout_path, marker_output) - drc_file_path = os.path.join(drc_dir, runset_file) + # Generating final db file + if os.path.exists(merged_output): + final_report = f'{merged_output.split(".")[0]}_final.lyrdb' + call_str = f"klayout -b -r {runset_analysis} -rd input={merged_output} -rd report={final_report}" + check_call(call_str,shell=True) - call_str = f"klayout -b -r {drc_file_path} -rd input={layout_path} -rd report={pattern_results} -rd thr={thrCount} {switches}" - os.makedirs(output_loc, exist_ok=True) - check_call(call_str, shell=True) + if os.path.exists(final_report): + pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count= parse_results_db(test_rule, final_report) - if os.path.isfile(pattern_results): - rules_with_violations = parse_results_db(pattern_results) - print(rules_with_violations) - if test_type == "pass": - if test_rule in rules_with_violations: - return "false_negative" + return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count else: - return "true_positive" + + return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count else: - if test_rule in rules_with_violations: - return "true_negative" - else: - return "false_positive" - else: - return "database_not_found" + + return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count + else: + return pass_patterns_count, fail_patterns_count, falsePos_count, falseNeg_count def run_all_test_cases(tc_df, run_dir, thrCount): """ @@ -151,7 +267,7 @@ def run_all_test_cases(tc_df, run_dir, thrCount): Path string to the location of the testing code and output. thrCount : int Numbe of threads to use per klayout run. - + Returns ------- pd.DataFrame @@ -159,6 +275,7 @@ def run_all_test_cases(tc_df, run_dir, thrCount): """ results = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor: future_to_run_id = dict() for i, row in tc_df.iterrows(): @@ -169,7 +286,6 @@ def run_all_test_cases(tc_df, run_dir, thrCount): drc_dir, row["test_path"], run_dir, - row["test_type"], row["table_name"], row["rule_name"], thrCount, @@ -179,7 +295,14 @@ def run_all_test_cases(tc_df, run_dir, thrCount): for future in concurrent.futures.as_completed(future_to_run_id): run_id = future_to_run_id[future] try: - status_string = future.result() + pass_patterns, fail_patterns, false_positive, false_negative = future.result() + if pass_patterns+ fail_patterns > 0: + if false_positive + false_negative == 0: + status_string = "Passed_rule" + else: + status_string = "Failed_rule" + else: + status_string = "Not_tested" except Exception as exc: logging.error("%d generated an exception: %s" % (run_id, exc)) traceback.print_exc() @@ -187,6 +310,10 @@ def run_all_test_cases(tc_df, run_dir, thrCount): info = dict() info["run_id"] = run_id + info["pass_patterns"] = pass_patterns + info["fail_patterns"] = fail_patterns + info["false_positive"] = false_positive + info["false_negative"] = false_negative info["run_status"] = status_string results.append(info) @@ -250,43 +377,16 @@ def analyze_test_patterns_coverage(rules_df, tc_df, output_path): pd.DataFrame A DataFrame with analysis of the rule testing coverage. """ - cov_rows_df = ( - tc_df[["table_name", "rule_name", "test_type", "test_name"]] - .groupby(["table_name", "rule_name", "test_type"]) + cov_df = ( + tc_df[["table_name", "rule_name"]] + .groupby(["table_name", "rule_name"]) .count() .reset_index(drop=False) - .rename(columns={"test_name": "count"}) - ) - cov_df = cov_rows_df.pivot( - index=["table_name", "rule_name"], columns=["test_type"], values=["count"] - ).reset_index(drop=False) - cov_df.columns = ["_".join(pair) for pair in cov_df.columns] - cov_df.rename( - columns={ - "table_name_": "table_name", - "rule_name_": "rule_name", - "count_fail": "fail_test_patterns_count", - "count_pass": "pass_test_patterns_count", - }, - inplace=True, ) - - cov_df = cov_df[ - [ - "table_name", - "rule_name", - "pass_test_patterns_count", - "fail_test_patterns_count", - ] - ] cov_df = cov_df.merge(rules_df, on="rule_name", how="outer") - cov_df[["pass_test_patterns_count", "fail_test_patterns_count"]] = ( - cov_df[["pass_test_patterns_count", "fail_test_patterns_count"]] - .fillna(0) - .astype(int) - ) cov_df["runset"].fillna("", inplace=True) cov_df.to_csv(os.path.join(output_path, "testcases_coverage.csv"), index=False) + return cov_df @@ -336,8 +436,330 @@ def analyze_regression_run(tc_cv_df, all_tc_df, output_path): return cov_df -def convert_results_db_to_gds(): - pass + +def generate_merged_testcase(orignal_testcase, marker_testcase): + """ + This function will merge orignal gds file with generated + markers gds file. + + Parameters + ---------- + orignal_testcase : string or Path object + Path string to the orignal testcase + + marker_testcase : string or Path + Path of the output marker gds file generated from db file. + + Returns + ------- + merged_gds_path : string or Path + Path of the final merged gds file generated. + """ + + new_lib = gdstk.Library() + + lib_org = gdstk.read_gds(orignal_testcase) + lib_marker = gdstk.read_gds(marker_testcase) + + #Getting flattened top cells + top_cell_org = lib_org.top_level()[0].flatten(apply_repetitions=True) + top_cell_marker = lib_marker.top_level()[0].flatten(apply_repetitions=True) + marker_polygons = top_cell_marker.get_polygons(apply_repetitions=True, include_paths=True, depth=None) + + # Merging all polygons of markers with original testcase + for marker_polygon in marker_polygons: + top_cell_org.add(marker_polygon) + + # Adding flattened merged cell + new_lib.add(top_cell_org.flatten(apply_repetitions=True)) + + # Writing final merged gds file + merged_gds_path = f'{marker_testcase.replace(".gds", "")}_merged.gds' + new_lib.write_gds(merged_gds_path) + + return merged_gds_path + + +def darw_polygons(polygon_data, cell, lay_num, lay_dt, path_width): + """ + This function is used for drawing gds file with all violated polygons. + + Parameters + ---------- + polygon_data : str + Contains data points for each violated polygon + cell: gdstk.cell + Top cell will contains all generated polygons + lay_num: int + Number of layer used to draw violated polygons + lay_dt : int + Data type of layer used to draw violated polygons + path_width : float + Width will used to draw edges + + Returns + ------- + None + """ + + # Cleaning data points + polygon_data = re.sub(r'\s+', '', polygon_data) + polygon_data = re.sub(r'[()]', '', polygon_data) + + print("## POLYGON DATA : ", polygon_data) + tag_split = polygon_data.split(":") + tag = tag_split[0] + poly_txt = tag_split[1] + polygons = re.split(r"[/|]", poly_txt) + + logging.info(f" Type : {tag}") + logging.info(f" All polygons {polygons}") + + # Select shape type to be drawn + if tag == "polygon": + for poly in polygons: + points = [(float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";")] + print(" All points : " , points) + cell.add(gdstk.Polygon(points, lay_num, lay_dt)) + + elif tag == "edge-pair": + for poly in polygons: + points = [(float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";")] + print(" All points : " , points) + cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) + + elif tag == "edge": + for poly in polygons: + points = [(float(p.split(",")[0]), float(p.split(",")[1])) for p in poly.split(";")] + print(" All points : " , points) + cell.add(gdstk.FlexPath(points, path_width, layer=lay_num, datatype=lay_dt)) + else: + logging.error(f"## Unknown type: {tag} ignored") + + +def convert_results_db_to_gds(results_database: str): + """ + This function will parse Klayout database for analysis. + It converts the lyrdb klayout database file to GDSII file + + Parameters + ---------- + results_database : string or Path object + Path string to the results file + + Returns + ------- + output_gds_path : string or Path + Path of the output marker gds file generated from db file. + output_runset_path : string or Path + Path of the output drc runset used for analysis. + """ + + # layer used as a marker + rule_lay_num = 10000 + # width of edges shapes + path_width = 0.01 + + pass_marker = "input(2, 222)" + fail_marker = "input(3, 222)" + fail_marker2 = "input(6, 222)" + text_marker = "input(11, 222)" + + # Generating violated rules and its points + cell_name = "" + lib = None + cell = None + in_item = False + rule_data_type_map = list() + analysis_rules = [] + + for ev, elem in tqdm(ET.iterparse(results_database, events=('start','end'))): + + if elem.tag != "item" and not in_item: + elem.clear() + continue + + if elem.tag != "item" and in_item: + continue + + if elem.tag == "item" and ev == "start": + in_item = True + continue + + rules = elem.findall("category") + values = elem.findall("values") + + if len(values) > 0: + polygons = values[0].findall("value") + else: + polygons = [] + + if cell_name == "": + all_cells = elem.findall("cell") + + if len(all_cells) > 0: + cell_name = all_cells[0].text + + if cell_name is None: + elem.clear() + continue + + lib = gdstk.Library(f"{cell_name}_markers") + cell = lib.new_cell(f"{cell_name}_markers") + + if len(rules) > 0: + rule_name = rules[0].text.replace("'", "") + if rule_name is None: + elem.clear() + continue + + else: + elem.clear() + continue + + if not rule_name in rule_data_type_map: + rule_data_type_map.append(rule_name) + + ## Drawing polygons here. + rule_lay_dt = rule_data_type_map.index(rule_name) + 1 + if not cell is None: + for p in polygons: + polygons = darw_polygons(p.text, cell, rule_lay_num, rule_lay_dt, path_width) + break + + ## Clearing memeory + in_item = False + elem.clear() + + # Writing final marker gds file + output_gds_path = f'{results_database.replace(".lyrdb", "")}_markers.gds' + lib.write_gds(output_gds_path) + + # Writing analysis rule deck + output_runset_path = f'{results_database.replace(".lyrdb", "")}_analysis.drc' + + runset_analysis_setup = f''' + source($input) + report("DRC analysis run report at", $report) + pass_marker = {pass_marker} + fail_marker = {fail_marker} + fail_marker2 = {fail_marker2} + text_marker = {text_marker} + ''' + + pass_patterns_rule =f''' + pass_marker.interacting( text_marker.texts("{rule_name}") ).output("{rule_name}_pass_patterns", "{rule_name}_pass_patterns polygons") + ''' + fail_patterns_rule =f''' + fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{rule_name}")) ).or( fail_marker.interacting(text_marker.texts("{rule_name}")).not_interacting(fail_marker2) ).output("{rule_name}_fail_patterns", "{rule_name}_fail_patterns polygons") + ''' + false_pos_rule = f''' + pass_marker.interacting(text_marker.texts("{rule_name}")).interacting(input({rule_lay_num}, {rule_lay_dt})).output("{rule_name}_false_positive", "{rule_name}_false_positive occurred") + ''' + false_neg_rule = f''' + ((fail_marker2.interacting(fail_marker.interacting(text_marker.texts("{rule_name}")))).or((fail_marker.interacting(input(11, 222).texts("{rule_name}")).not_interacting(fail_marker2)))).not_interacting(input({rule_lay_num}, {rule_lay_dt})).output("{rule_name}_false_negative", "{rule_name}_false_negative occurred") + ''' + + # Adding list of analysis rules + if not any(rule_name in rule_txt for rule_txt in analysis_rules): + analysis_rules.append(pass_patterns_rule) + analysis_rules.append(fail_patterns_rule) + analysis_rules.append(false_pos_rule) + analysis_rules.append(false_neg_rule) + + with open(output_runset_path, "a+") as runset_analysis: + # analysis_rules = list(dict.fromkeys(analysis_rules)) + runset_analysis.write(runset_analysis_setup) + runset_analysis.write("".join(analysis_rules)) + + return output_gds_path, output_runset_path + + +def get_unit_tests_dataframe(gds_files): + """ + This function is used for getting all test cases available in a formated data frame before running. + + Parameters + ---------- + gds_files : str + Path string to the location of unit test cases path. + Returns + ------- + pd.DataFrame + A DataFrame that has all the targetted test cases that we need to run. + """ + + # Get rules from gds + rules = [] + test_paths = [] + # layer num of rule text + lay_num = 11 + # layer data type of rule text + lay_dt = 222 + + # Getting all rules names from testcases + for gds_file in gds_files: + library = gdstk.read_gds(gds_file) + top_cells = library.top_level() #Get top cells + for cell in top_cells: + flatten_cell = cell.flatten() + # Get all text labels for each cell + labels = flatten_cell.get_labels(apply_repetitions=True, depth=None, layer=lay_num, texttype=lay_dt) + # Get label value + for label in labels: + rule = label.text + if rule not in rules: + rules.append(rule) + test_paths.append(gds_file) + + tc_df = pd.DataFrame({"test_path":test_paths, "rule_name": rules}) + tc_df["table_name"] = tc_df["test_path"].apply( + lambda x: x.name.replace(".gds", "") + ) + return tc_df + + +def build_unit_tests_dataframe(unit_test_cases_dir, target_table, target_rule): + """ + This function is used for getting all test cases available in a formated data frame before running. + + Parameters + ---------- + unit_test_cases_dir : str + Path string to the location of unit test cases path. + target_table : str or None + Name of table that we want to run regression for. If None, run all found. + target_rule : str or None + Name of rule that we want to run regression for. If None, run all found. + + Returns + ------- + pd.DataFrame + A DataFrame that has all the targetted test cases that we need to run. + """ + all_unit_test_cases = sorted( + Path(unit_test_cases_dir).rglob("*.{}".format(SUPPORTED_TC_EXT)) + ) + logging.info( + "## Total number of test cases found: {}".format(len(all_unit_test_cases)) + ) + + # Get test cases df from test cases + tc_df = get_unit_tests_dataframe(all_unit_test_cases) + + ## Filter test cases based on filter provided + if target_rule is not None: + tc_df = tc_df[tc_df["rule_name"] == target_rule] + + if target_table is not None: + tc_df = tc_df[tc_df["table_name"] == target_table] + + if len(tc_df) < 1: + logging.error("No test cases remaining after filtering.") + exit(1) + + return tc_df + def run_regression(drc_dir, output_path, target_table, target_rule, cpu_count): """ @@ -362,35 +784,43 @@ def run_regression(drc_dir, output_path, target_table, target_rule, cpu_count): bool If all regression passed, it returns true. If any of the rules failed it returns false. """ - + ## Parse Existing Rules rules_df = parse_existing_rules(drc_dir, output_path) - logging.info("## Total number of rules found: {}".format(len(rules_df))) + logging.info("## Total number of rules found in rule decks: {}".format(len(rules_df))) print(rules_df) - test_cases_path = os.path.join(drc_dir, "testing") + ## Get all test cases available in the repo. + test_cases_path = os.path.join(drc_dir, "testing/testcases") + unit_test_cases_path = os.path.join(test_cases_path, "unit_testcases") + tc_df = build_unit_tests_dataframe(unit_test_cases_path, target_table, target_rule) + logging.info("## Total number of rules found in test cases: {}".format(len(tc_df))) - ## TODO: Completing DRC regression - - exit () ## Get tc_df with the correct rule deck per rule. tc_df = tc_df.merge(rules_df, how="left", on="rule_name") - tc_df["run_id"] = list(range(len(tc_df))) - + tc_df["run_id"] = tc_df.groupby(['test_path']).ngroup() + tc_df.drop_duplicates(inplace=True) print(tc_df) + tc_df.to_csv(os.path.join(output_path, "all_test_cases.csv"), index=False) ## Do some test cases coverage analysis cov_df = analyze_test_patterns_coverage(rules_df, tc_df, output_path) + cov_df.drop_duplicates(inplace=True) print(cov_df) - + ## Run all test cases all_tc_df = run_all_test_cases(tc_df, output_path, cpu_count) + all_tc_df.drop_duplicates(inplace=True) print(all_tc_df) all_tc_df.to_csv( os.path.join(output_path, "all_test_cases_results.csv"), index=False ) + # Generating merged testcase for violated rules + + exit () + ## Analyze regression run and generate a report regr_df = analyze_regression_run(cov_df, all_tc_df, output_path) print(regr_df) @@ -409,6 +839,7 @@ def run_regression(drc_dir, output_path, target_table, target_rule, cpu_count): logging.info("## All testcases passed.") return True + def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, target_rule: str): """ Main Procedure. @@ -434,8 +865,8 @@ def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, targ """ # No. of threads - cpu_count = os.cpu_count() if args["--mp"] == None else int(args["--mp"]) - + cpu_count = os.cpu_count() if args["--mp"] is None else int(args["--mp"]) + # Pandas printing setup pd.set_option("display.max_columns", None) pd.set_option("display.max_rows", None) @@ -450,6 +881,9 @@ def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, targ # Start of execution time t0 = time.time() + ## Check Klayout version + check_klayout_version() + # Calling regression function run_status = run_regression( drc_dir, output_path, target_table, target_rule, cpu_count @@ -469,11 +903,12 @@ def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, targ # -------------------------- MAIN -------------------------------- # ================================================================ + if __name__ == "__main__": # docopt reader args = docopt(__doc__, version="DRC Regression: 0.2") - + # arguments run_name = args["--run_name"] target_table = args["--table_name"] @@ -496,14 +931,14 @@ def main(drc_dir: str, rules_dir: str, output_path: str, target_table: str, targ logging.basicConfig( level=logging.DEBUG, handlers=[ - logging.FileHandler(os.path.join(output_path, "{}.log".format(run_name))), - logging.StreamHandler() + logging.FileHandler(os.path.join(output_path, "{}.log".format(run_name))), + logging.StreamHandler() ], - format=f"%(asctime)s | %(levelname)-7s | %(message)s", + format="%(asctime)s | %(levelname)-7s | %(message)s", datefmt="%d-%b-%Y %H:%M:%S", ) - + # Calling main function run_status = main( drc_dir, rules_dir, output_path, target_table, target_rule - ) \ No newline at end of file + ) diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/ANT.gds b/rules/klayout/drc/testing/testcases/unit_testcases/antenna.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/ANT.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/antenna.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/DNWELL.gds b/rules/klayout/drc/testing/testcases/unit_testcases/dnwell.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/DNWELL.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/dnwell.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/Dualgate.gds b/rules/klayout/drc/testing/testcases/unit_testcases/dualgate.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/Dualgate.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/dualgate.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/EFUSE.gds b/rules/klayout/drc/testing/testcases/unit_testcases/efuse.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/EFUSE.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/efuse.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/ESD.gds b/rules/klayout/drc/testing/testcases/unit_testcases/esd.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/ESD.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/esd.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/HRES.gds b/rules/klayout/drc/testing/testcases/unit_testcases/hres.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/HRES.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/hres.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/MDN.gds b/rules/klayout/drc/testing/testcases/unit_testcases/ldnmos.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/MDN.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/ldnmos.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/MDP.gds b/rules/klayout/drc/testing/testcases/unit_testcases/ldpmos.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/MDP.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/ldpmos.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/LRES.gds b/rules/klayout/drc/testing/testcases/unit_testcases/lres.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/LRES.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/lres.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/LVPWELL.gds b/rules/klayout/drc/testing/testcases/unit_testcases/lvpwell.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/LVPWELL.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/lvpwell.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/Mcell.gds b/rules/klayout/drc/testing/testcases/unit_testcases/mcell.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/Mcell.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/mcell.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/NAT.gds b/rules/klayout/drc/testing/testcases/unit_testcases/nat.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/NAT.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/nat.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/Nplus.gds b/rules/klayout/drc/testing/testcases/unit_testcases/nplus.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/Nplus.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/nplus.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/Pplus.gds b/rules/klayout/drc/testing/testcases/unit_testcases/pplus.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/Pplus.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/pplus.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/PRES.gds b/rules/klayout/drc/testing/testcases/unit_testcases/pres.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/PRES.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/pres.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/SB.gds b/rules/klayout/drc/testing/testcases/unit_testcases/sab.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/SB.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/sab.gds diff --git a/rules/klayout/drc/testing/testcases/unit_testcases/VIA.gds b/rules/klayout/drc/testing/testcases/unit_testcases/via.gds similarity index 100% rename from rules/klayout/drc/testing/testcases/unit_testcases/VIA.gds rename to rules/klayout/drc/testing/testcases/unit_testcases/via.gds