├── .bazelrc ├── .gitattributes ├── .github ├── docs │ └── CODEOWNERS └── workflows │ ├── bazel_tests.yml │ └── yapf_check.yml ├── .gitignore ├── BUILD ├── LICENSE ├── README.md ├── WORKSPACE ├── bin ├── BUILD ├── check_test.py ├── lint_parser_ascent.py ├── lint_parser_hal.py └── simmer.py ├── deps.bzl ├── docs ├── BUILD └── defs.md ├── env ├── .style.yapf └── azure_pipeline.yaml ├── examples ├── apb │ ├── BUILD │ ├── apb.sv │ ├── apb_pkg.sv │ └── unit_test_top.sv └── dpi │ ├── BUILD │ ├── dpi.c │ ├── dpi.h │ ├── dpi_pkg.sv │ ├── dpi_test.c │ └── unit_test_top.sv ├── lib ├── BUILD ├── __init__.py ├── cmn_logging.py ├── job_lib.py ├── parser_actions.py ├── regression.py └── rv_utils.py ├── requirements.txt ├── simulator.bzl ├── tests ├── BUILD └── doc_test.sh ├── vendors ├── cadence │ ├── BUILD │ ├── verilog_dv_default_sim_opts.f │ ├── verilog_dv_tb_compile_args.f.template │ ├── verilog_dv_tb_compile_args_pldm_ice.f.template │ ├── verilog_dv_unit_test.sh.template │ ├── verilog_rtl_cdc_epilogue_cmds.tcl.template │ ├── verilog_rtl_cdc_preamble_cmds.tcl.template │ ├── verilog_rtl_cdc_test.sh.template │ ├── verilog_rtl_lint_cmds.tcl.template │ ├── verilog_rtl_lint_test.sh.template │ ├── verilog_rtl_unit_test.sh.template │ ├── verilog_rtl_unit_test_svunit.sh.template │ ├── verilog_rtl_unit_test_svunit_waves.tcl.template │ └── verilog_rtl_unit_test_waves.tcl.template ├── common │ ├── BUILD │ └── verilog_dv_tb_runtime_args.f.template ├── real_intent │ ├── BUILD │ ├── verilog_rtl_lint_cmds.tcl.template │ └── verilog_rtl_lint_test.sh.template └── synopsys │ ├── BUILD │ ├── verilog_dv_default_sim_opts.f │ └── verilog_dv_tb_compile_args.f.template └── verilog ├── BUILD ├── defs.bzl └── private ├── BUILD ├── dv.bzl ├── rtl.bzl └── verilog.bzl /.bazelrc: -------------------------------------------------------------------------------- 1 | # Cadence Xcelium requires $HOME to be set 2 | test --action_env=HOME 3 | 4 | # Lightelligence Specific 5 | build --@rules_verilog//:verilog_dv_unit_test_command="runmod -t xrun --" 6 | build --@rules_verilog//:verilog_rtl_cdc_test_command="runmod -t jg --" 7 | build --@rules_verilog//:verilog_rtl_unit_test_command="runmod -t xrun --" 8 | build --@rules_verilog//:verilog_rtl_lint_test_command="runmod -t xrun --" 9 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Force GitHugb to recognize BUILD and *.bzl files with the Starlark language 2 | *.bzl linguist-language=Starlark 3 | BUILD linguist-language=Starlark 4 | -------------------------------------------------------------------------------- /.github/docs/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @jmlemay99 @justin371 2 | -------------------------------------------------------------------------------- /.github/workflows/bazel_tests.yml: -------------------------------------------------------------------------------- 1 | name: Run Bazel Tests 2 | on: [push] 3 | jobs: 4 | bazel-tests: 5 | name: Bazel Tests 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: checkout 9 | uses: actions/checkout@main 10 | 11 | - name: install-bazelisk 12 | uses: vsco/bazelisk-action@master 13 | with: 14 | version: '1.7.5' 15 | bazel-install-path: './.local/bin' 16 | os: 'linux' 17 | 18 | - name: run-buildifier-diff 19 | run: bazel run //tests:buildifier_format_diff 20 | -------------------------------------------------------------------------------- /.github/workflows/yapf_check.yml: -------------------------------------------------------------------------------- 1 | name: YAPF Formatting Check 2 | on: [push] 3 | jobs: 4 | formatting-check: 5 | name: Formatting Check 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@main 9 | - name: Run YAPF python style checks 10 | uses: AlexanderMelde/yapf-action@master 11 | with: 12 | args: --diff --recursive --style env/.style.yapf 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | bazel-* 3 | __pycache__ -------------------------------------------------------------------------------- /BUILD: -------------------------------------------------------------------------------- 1 | load("@rules_verilog//verilog:defs.bzl", "verilog_tool_encapsulation") 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | verilog_tool_encapsulation( 6 | name = "verilog_dv_unit_test_command", 7 | build_setting_default = "xrun", 8 | ) 9 | 10 | verilog_tool_encapsulation( 11 | name = "verilog_rtl_cdc_test_command", 12 | build_setting_default = "jg", 13 | ) 14 | 15 | verilog_tool_encapsulation( 16 | name = "verilog_rtl_lint_test_command", 17 | build_setting_default = "xrun", 18 | ) 19 | 20 | verilog_tool_encapsulation( 21 | name = "verilog_rtl_unit_test_command", 22 | build_setting_default = "xrun", 23 | ) 24 | 25 | verilog_tool_encapsulation( 26 | name = "verilog_rtl_svunit_test_command", 27 | build_setting_default = "xrun", 28 | ) 29 | 30 | verilog_tool_encapsulation( 31 | name = "verilog_rtl_wave_viewer_command", 32 | build_setting_default = "simvision", 33 | ) 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2021 Lightelligence, Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Verilog rules for Bazel 2 | 3 | ## Setup 4 | 5 | Add the following to your `WORKSPACE` file: 6 | 7 | ```skylark 8 | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") 9 | http_archive( 10 | name = "rules_verilog", 11 | urls = ["https://github.com/Lightelligence/rules_verilog/archive/v0.0.0.tar.gz"], 12 | sha256 = "ab64a872410d22accb383c7ffc6d42e90f4de40a7cd92f43f4c26471c4f14908", 13 | strip_prefix = "rules_verilog-0.0.0", 14 | ) 15 | load("@rules_verilog//:deps.bzl", "verilog_dependencies") 16 | verilog_dependencies() 17 | ``` 18 | **Note**: Update commit and sha256 as needed. 19 | 20 | 21 | Cadence Xcelium needs both HOME and LM_LICENESE_FILE environment variables, add them to your `.bazelrc` file: 22 | 23 | ``` 24 | test --action_env=HOME 25 | test --action_env=LM_LICENSE_FILE 26 | ``` 27 | 28 | ### Python Dependencies 29 | rules_verilog is also dependent on several python libraries. These are defined in requirements.txt and maybe installed in the package manager of your choice. The recommended flow is to install them via the pip_install rule in your `WORKSPACE` file: 30 | 31 | ```skylark 32 | load("@rules_python//python:pip.bzl", "pip_install") 33 | 34 | pip_install( 35 | name = "pip_deps", 36 | requirements = "@rules_verilog//:requirements.txt", 37 | ) 38 | ``` 39 | 40 | ## Rules 41 | 42 | ### RTL 43 | Load rules into your `BUILD` files from [@rules_verilog//verilog:defs.bzl](verilog/defs.bzl) 44 | 45 | - [verilog_rtl_library](docs/defs.md#verilog_rtl_library) 46 | - [verilog_rtl_pkg](docs/defs.md#verilog_rtl_pkg) 47 | - [verilog_rtl_shell](docs/defs.md#verilog_rtl_shell) 48 | - [verilog_rtl_unit_test](docs/defs.md#verilog_rtl_unit_test) 49 | - [verilog_rtl_lint_test](docs/defs.md#verilog_rtl_lint_test) 50 | - [verilog_rtl_cdc_test](docs/defs.md#verilog_rtl_cdc_test) 51 | 52 | 53 | ### DV 54 | Load rules into your `BUILD` files from [@rules_verilog//verilog:defs.bzl](verilog/defs.bzl) 55 | 56 | - [verilog_dv_library](docs/defs.md#verilog_dv_library) 57 | - [verilog_dv_unit_test](docs/defs.md#verilog_dv_unit_test) 58 | - [verilog_dv_tb](docs/defs.md#verilog_dv_tb) 59 | - [verilog_dv_test_cfg](docs/defs.md#verilog_dv_test_cfg) 60 | 61 | 62 | ### Generic Verilog 63 | Load rules into your `BUILD` files from [@rules_verilog//verilog:defs.bzl](verilog/defs.bzl) 64 | 65 | - [verilog_test](docs/defs.md#verilog_test) 66 | 67 | ## Caveats 68 | - The SVUnit package always adds svunit_pkg.sv to the compiler command line after the user flists. Without compiler library discovery, user flists cannot include/import anything that depends on svunit_pkg. 69 | - To work around this ordering dependency, the project Bazel rules must create the verilog_rtl_lib using the module files as headers, and use a dummy .sv file as the top module. 70 | - By declaring the module files as headers, they will not get put on the compiler command line via flists - rather their parents directory appears as an incdir. 71 | - This allows SVUnit's generated flist to appear last on the compiler command line, without violating any compiler ordering dependencies. 72 | 73 | ### Vendor Support 74 | These rules were written with the Cadence and Synopsys tools as the underlying compiler and simulator. Abstraction leaks are prevalent throughout the rules. 75 | 76 | ### UVM Testbenches 77 | While rules for unit tests exist, the [verilog_dv_tb](docs/defs.md#verilog_dv_tb) and [verilog_dv_test_cfg](docs/defs.md#verilog_dv_test_cfg) rules are intended to work in conjunction with an external script capable of spawning many parallel simulations. Documentation throughout this codebase refers to a tool called `simmer` which may be released in a future version. 78 | -------------------------------------------------------------------------------- /WORKSPACE: -------------------------------------------------------------------------------- 1 | workspace(name = "rules_verilog") 2 | 3 | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") 4 | load("@rules_verilog//:deps.bzl", "verilog_dependencies") 5 | 6 | verilog_dependencies() 7 | 8 | load("@rules_python//python:pip.bzl", "pip_install") 9 | 10 | pip_install( 11 | name = "pip_deps", 12 | requirements = "@rules_verilog//:requirements.txt", 13 | ) 14 | 15 | # buildifier is written in Go and hence needs rules_go to be built. 16 | # See https://github.com/bazelbuild/rules_go for the up to date setup instructions. 17 | http_archive( 18 | name = "io_bazel_rules_go", 19 | sha256 = "d1ffd055969c8f8d431e2d439813e42326961d0942bdf734d2c95dc30c369566", 20 | urls = [ 21 | "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.5/rules_go-v0.24.5.tar.gz", 22 | "https://github.com/bazelbuild/rules_go/releases/download/v0.24.5/rules_go-v0.24.5.tar.gz", 23 | ], 24 | ) 25 | 26 | load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") 27 | 28 | go_rules_dependencies() 29 | 30 | go_register_toolchains() 31 | 32 | http_archive( 33 | name = "bazel_skylib", 34 | sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", 35 | urls = [ 36 | "https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz", 37 | "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz", 38 | ], 39 | ) 40 | 41 | load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace") 42 | 43 | bazel_skylib_workspace() 44 | 45 | http_archive( 46 | name = "bazel_gazelle", 47 | sha256 = "b85f48fa105c4403326e9525ad2b2cc437babaa6e15a3fc0b1dbab0ab064bc7c", 48 | urls = [ 49 | "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.2/bazel-gazelle-v0.22.2.tar.gz", 50 | "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.2/bazel-gazelle-v0.22.2.tar.gz", 51 | ], 52 | ) 53 | 54 | load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") 55 | 56 | gazelle_dependencies() 57 | 58 | http_archive( 59 | name = "io_bazel_stardoc", 60 | sha256 = "aa814dae0ac400bbab2e8881f9915c6f47c49664bf087c409a15f90438d2c23e", 61 | urls = [ 62 | "https://mirror.bazel.build/github.com/bazelbuild/stardoc/releases/download/0.5.1/stardoc-0.5.1.tar.gz", 63 | "https://github.com/bazelbuild/stardoc/releases/download/0.5.1/stardoc-0.5.1.tar.gz", 64 | ], 65 | ) 66 | 67 | load("@io_bazel_stardoc//:setup.bzl", "stardoc_repositories") 68 | 69 | stardoc_repositories() 70 | 71 | http_archive( 72 | name = "com_google_protobuf", 73 | sha256 = "b07772d38ab07e55eca4d50f4b53da2d998bb221575c60a4f81100242d4b4889", 74 | strip_prefix = "protobuf-3.20.0", 75 | url = "https://github.com/protocolbuffers/protobuf/archive/refs/tags/v3.20.0.tar.gz", 76 | ) 77 | 78 | load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") 79 | 80 | protobuf_deps() 81 | 82 | http_archive( 83 | name = "com_github_bazelbuild_buildtools", 84 | sha256 = "b8fc5ee8f48a0f7ff0a72f8457aaefb5807777162caf6967c5648f73ae320cf3", 85 | strip_prefix = "buildtools-master-5.5.1", 86 | url = "https://github.com/bazelbuild/bazel/archive/refs/tags/5.1.1.tar.gz", 87 | ) 88 | -------------------------------------------------------------------------------- /bin/BUILD: -------------------------------------------------------------------------------- 1 | load("@rules_python//python:defs.bzl", "py_binary") 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_binary( 6 | name = "lint_parser_hal", 7 | srcs = ["lint_parser_hal.py"], 8 | deps = ["//lib:cmn_logging"], 9 | ) 10 | 11 | py_binary( 12 | name = "lint_parser_ascent", 13 | srcs = ["lint_parser_ascent.py"], 14 | deps = ["//lib:cmn_logging"], 15 | ) 16 | 17 | py_binary( 18 | name = "simmer", 19 | srcs = ["simmer.py"], 20 | deps = [ 21 | "//bin:check_test", 22 | "//lib:cmn_logging", 23 | "//lib:job_lib", 24 | "//lib:parser_actions", 25 | "//lib:regression", 26 | "//lib:rv_utils", 27 | ], 28 | ) 29 | 30 | py_binary( 31 | name = "check_test", 32 | srcs = ["check_test.py"], 33 | deps = ["//lib:cmn_logging"], 34 | ) 35 | -------------------------------------------------------------------------------- /bin/check_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Scan test that confirms the simultion log file has no errors 4 | # 5 | # Error could include test from the bench, assertions, not seeing enough activity, etc. 6 | # 7 | 8 | import argparse 9 | import os 10 | import platform 11 | import re 12 | import sys 13 | import subprocess 14 | 15 | # Error signatures from log files 16 | error_signature = [ 17 | r'%E-', 18 | r'%F-', 19 | r'%W-', 20 | r'#E', 21 | r"\*ERROR\*", 22 | r"\*FAILED\*", 23 | r"SVA_CHECKER_ERROR", 24 | r"Assertion FAILURE", 25 | r"Solver failed", 26 | r"VIRL_MEM_ERR", 27 | r"Warning-.FCIBR", 28 | r"Warning-.FCPSBU", 29 | r"Warning-.STASKW_CO", 30 | r"Warning-.SVART-NAFRLTS", 31 | r"Warning-.FCIELIE", 32 | r"Warning:.*AxiPC.sv", 33 | r"Error!!", 34 | r"Error:", 35 | r"ERROR..FAILURE", 36 | r"FATAL..FAILURE", 37 | r"Error-", 38 | r"UVM_ERROR [@/]", 39 | r"UVM_FATAL [@/]", 40 | r"WARNING.FAILURE", 41 | r" \*E,", 42 | r" \*F,", 43 | r"VIRL_MEM_WARNING", 44 | r": Assertion .* failed\.", 45 | r"UVM_WARNING .*uvm_reg_map.*RegModel.*In map .*overlaps with address of existing register", 46 | r"UVM_WARNING .*uvm_reg_map.*RegModel.*In map .*overlaps with address range of memory", 47 | r"UVM_WARNING .*uvm_reg_map.*RegModel.*In map .*overlaps existing memory with range", 48 | r"UVM_WARNING .*uvm_reg_map.*RegModel.*In map .*maps to same address as register", 49 | r"UVM_WARNING .*uvm_reg_map.*RegModel.*In map .*maps to same address as memory", 50 | r"\*W,RMEMNOF", 51 | r"\*W,ASRTST .*has failed", 52 | ] 53 | 54 | # Signatures indicating a successful test completion 55 | finish_signature = [ 56 | "#I Final Report", "finish at simulation time", "Simulation complete via", "--- UVM Report Summary ---" 57 | ] 58 | 59 | #compile the regular expressions to be used in a search 60 | err_regex = None 61 | 62 | 63 | def gen_err_regex(): 64 | global err_regex 65 | err_regex = re.compile("(" + ")|(".join(error_signature) + ")") 66 | 67 | 68 | finish_regex = re.compile("(" + ")|(".join(finish_signature) + ")") 69 | 70 | enable_regex = re.compile(".*TEST_CHECK_ENABLE: (.*)") 71 | disable_regex = re.compile(".*TEST_CHECK_DISABLE: (.*)") 72 | 73 | 74 | def parse_args(argv): 75 | parser = argparse.ArgumentParser(description="Check a simulation logfile for errors.", 76 | formatter_class=argparse.RawTextHelpFormatter) 77 | parser.add_argument("logfile", help="Logfile to parse") 78 | parser.add_argument("--file-size-limit", 79 | default=0, 80 | help='Maximum logfile size (MB) if running at UVM_NONE should be 5MB') 81 | parser.add_argument("--error-limit", default=25, help='Stop parsing logfile at this number of errors') 82 | options = parser.parse_args(argv) 83 | return options 84 | 85 | 86 | def enable_disable_checks(line): 87 | enable_match = enable_regex.match(line) 88 | if enable_match: 89 | # Add check to err_regex, recompile 90 | new_regex = enable_match.group(1) 91 | if new_regex not in error_signature: 92 | error_signature.append(new_regex) 93 | gen_err_regex() 94 | return True 95 | disable_match = disable_regex.match(line) 96 | if disable_match: 97 | # Remove check from err_regex, recompile 98 | remove_regex = disable_match.group(1) 99 | if remove_regex in error_signature: 100 | error_signature.remove(remove_regex) 101 | gen_err_regex() 102 | return True 103 | return False 104 | 105 | 106 | def main(options): 107 | gen_err_regex() 108 | error_lines = [] 109 | found_finish_line = False 110 | output_file = os.path.basename(options.logfile) 111 | seed_lines = [] 112 | run_time_lines = [] 113 | uvm_verbosity = None 114 | if (len(sys.argv) > 2): 115 | max_size = eval(sys.argv[2]) 116 | else: 117 | max_size = 0 118 | 119 | # search lines of the log file for "seed", or error or finish signatures 120 | 121 | with open(options.logfile, 'r', encoding='utf-8', errors='ignore') as in_file: 122 | try: 123 | for line_no, line in enumerate(in_file): 124 | # Before the actual checking, we need to see if a disable or enable statement is used 125 | if enable_disable_checks(line): 126 | pass # Side effect of this function is to add or remove the desired regex to err_regex 127 | elif err_regex.search(line): 128 | error_lines.append(line) 129 | if len(error_lines) >= options.error_limit: 130 | break 131 | elif "random seed used" in line or "SVSEED" in line: 132 | # This is going to be a really inefficient lookup eventually 133 | seed_lines.append(line) 134 | elif not found_finish_line and finish_regex.search(line): 135 | found_finish_line = True 136 | elif "real\t" in line: 137 | run_time_lines.append(line) 138 | except UnicodeDecodeError: 139 | finish_line = 0 140 | print("UnicodeDecodeError on line ", line_num + 1) 141 | 142 | if len(error_lines) > 0: 143 | # We found some errors 144 | print("Error found in ", options.logfile) 145 | with open(output_file + ".err", 'w') as err_log: 146 | for line in seed_lines: 147 | err_log.write(line) 148 | for line in run_time_lines: 149 | err_log.write(line) 150 | for line in error_lines: 151 | err_log.write(line) 152 | err_log.write('%s\n' % platform.node()) 153 | # Clean up log file by removing path to filenames 154 | #fn_regex = re.compile('(/.*/)(.*\.sv.?)\(') 155 | #with open(options.logfile, 'r', encoding='utf-8', errors='ignore') as in_file: 156 | #with open(output_file+".log",'w') as out_file: 157 | #for line in in_file: 158 | #out_file.write(fn_regex.sub('\g<2>(', line)) 159 | sys.exit(1) 160 | elif not found_finish_line: 161 | # No finish was found 162 | with open(output_file + ".err", 'w') as err_log: 163 | for line in seed_lines: 164 | err_log.write(line) 165 | for line in run_time_lines: 166 | err_log.write(line) 167 | err_log.write('******Did not find finish encountered!!!\n\n') 168 | err_log.write('%s\n' % platform.node()) 169 | tail_lines = subprocess.Popen(['tail', '-25', options.logfile], 170 | stdout=subprocess.PIPE, 171 | universal_newlines=True).stdout.readlines() 172 | # print subprocess.check_output(['tail', '-10', options.logfile]) 173 | for line in tail_lines: 174 | err_log.write(line) 175 | sys.exit(1) 176 | elif max_size > 0 and (os.path.getsize(options.logfile) > max_size * 2**20): 177 | with open(output_file + ".err", 'w') as err_log: 178 | err_log.write("#E log file size %d exceeds max_size %d" % (os.path.getsize(options.logfile), max_size)) 179 | sys.exit(1) 180 | else: 181 | # Test run passed 182 | with open(output_file + ".pass", 'w') as pass_log: 183 | pass_log.write('%s\n' % platform.node()) 184 | pass_log.write("No Err found\n") 185 | for line in seed_lines: 186 | pass_log.write(line) 187 | for line in run_time_lines: 188 | pass_log.write(line) 189 | sys.exit(0) 190 | 191 | 192 | if __name__ == '__main__': 193 | options = parse_args(sys.argv[1:]) 194 | main(options) 195 | -------------------------------------------------------------------------------- /bin/lint_parser_ascent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Parses results from Real Intent Ascent Lint""" 3 | 4 | ################################################################################ 5 | # stdlib 6 | import argparse 7 | import os 8 | import re 9 | import subprocess 10 | import sys 11 | 12 | ################################################################################ 13 | # Checkout specific libraries 14 | import cmn_logging 15 | 16 | ################################################################################ 17 | # Constants 18 | LINE_WAIVER_REGEXP = re.compile("\S\s// lint: disable=(.*)") 19 | BLOCK_WAIVER_START_REGEXP = re.compile("\s*// lint: disable=(.*)") 20 | BLOCK_WAIVER_END_REGEXP = re.compile("\s*// lint: enable=(.*)") 21 | 22 | ################################################################################ 23 | # Helpers 24 | 25 | 26 | def parse_args(argv): 27 | parser = argparse.ArgumentParser(description="Parse output report from Ascent Lint using inline RTL waivers", 28 | formatter_class=argparse.RawTextHelpFormatter) 29 | 30 | parser.add_argument('--tool-debug', 31 | default=False, 32 | action='store_true', 33 | help='Set the verbosity of this tool to debug level.') 34 | parser.add_argument("--sw", 35 | dest="show_waived", 36 | default=False, 37 | action='store_true', 38 | help='Show previously waived messages.') 39 | parser.add_argument("--sh", 40 | dest="show_help", 41 | action='store_true', 42 | help="Display the help message from Ascent for each individual issue") 43 | options = parser.parse_args(argv) 44 | return options 45 | 46 | 47 | ################################################################################ 48 | # Classes 49 | 50 | 51 | class AscentMessage(object): 52 | 53 | def __init__(self, errcode, severity, info, filename, lineno): 54 | self.errcode = errcode # ID 55 | self.severity = severity 56 | self.info = info 57 | self.filename = filename 58 | self.lineno = lineno 59 | 60 | self.waived = False 61 | 62 | def __repr__(self): 63 | return "{}:{}:{} {}".format(self.filename, self.lineno, self.errcode, self.info) 64 | 65 | @classmethod 66 | def from_csv(cls, csv_row): 67 | if csv_row['severity'] == "S": 68 | return None 69 | 70 | severity = csv_row['severity'] 71 | errcode = csv_row['rulename'] 72 | info = csv_row['details'] 73 | 74 | match = re.match(r'(\S+):(\d+)', csv_row['file']) 75 | filename = match.group(1) 76 | lineno = match.group(2) 77 | 78 | return cls(errcode, severity, info, filename, lineno) 79 | 80 | 81 | class AscentLintLog(object): 82 | 83 | def __init__(self, path, log): 84 | self.issues = [] 85 | self.files_with_notes = {} 86 | self.dirs_with_notes = {} 87 | self.file_map = {} 88 | 89 | fieldnames = ['severity', 'rulename', 'file', 'details', 'status', 'comments'] 90 | found_file_map = False 91 | 92 | # Can't use CSV. Need the full logfile because need to grab the file definitions 93 | with open(path, 'r', encoding='utf-8', errors='replace') as logp: 94 | for line in logp: 95 | if line.strip() == "Lint engine run exited with errors upstream. Skipping report.": 96 | log.critical("Ascent failed before it can render a report. Exiting") 97 | match = re.match("([IWE])\s+([A-Z_]+):\s+(\S+):(\d+)\s+(.*)\s+New", line) 98 | if match: 99 | self.issues.append( 100 | AscentMessage(match.group(2), match.group(1), 101 | match.group(5).strip(), match.group(3), int(match.group(4)))) 102 | continue 103 | if line.startswith("File Definitions"): 104 | found_file_map = True 105 | if found_file_map: 106 | match = re.match("([a-zA-Z0-9_]+\.s?vh?(_\d+)?)\s+(.{0,2}\S+)", line) 107 | if match: 108 | self.file_map[match.group(1)] = match.group(3) 109 | 110 | self.infos = [issue for issue in self.issues if issue.severity == 'I'] 111 | self.warnings = [issue for issue in self.issues if issue.severity == 'W'] 112 | self.errors = [issue for issue in self.issues if issue.severity == 'E'] 113 | 114 | self.files_with_notes = set([issue.filename for issue in self.issues]) 115 | 116 | # Each entry in this set is a tuple of (filename, lineno, errcode) 117 | line_waivers = {} 118 | # line_waivers = set() 119 | block_waivers = {} 120 | for filename in self.files_with_notes: 121 | if filename == "": 122 | continue 123 | 124 | # Map from the base path name to the bazel-relative path name to be able to find waivers 125 | relative_filename = self.file_map[filename] 126 | with open(relative_filename, errors='replace') as filep: 127 | for i, line in enumerate(filep.readlines()): 128 | match = LINE_WAIVER_REGEXP.search(line) 129 | if match: 130 | self._handle_line_waiver(line_waivers, filename, i + 1, match.group(1), log) 131 | match = BLOCK_WAIVER_START_REGEXP.match(line) 132 | if match: 133 | self._handle_block_start(block_waivers, filename, i + 1, match.group(1), log) 134 | continue 135 | match = BLOCK_WAIVER_END_REGEXP.match(line) 136 | if match: 137 | self._handle_block_end(block_waivers, filename, i + 1, match.group(1), log) 138 | continue 139 | 140 | self._check_block_waivers(block_waivers, log) 141 | 142 | for issue in self.issues: 143 | if issue.filename in line_waivers and issue.errcode in line_waivers[issue.filename]: 144 | for lineno in line_waivers[issue.filename][issue.errcode]: 145 | if issue.lineno == lineno: 146 | issue.waived = True 147 | continue 148 | # Don't use try/except because this should not succeed in try often 149 | if issue.filename in block_waivers and issue.errcode in block_waivers[issue.filename]: 150 | for line_pair in block_waivers[issue.filename][issue.errcode]: 151 | if issue.lineno > line_pair[0] and issue.lineno < line_pair[1]: 152 | issue.waived = True 153 | 154 | self.prep_file_stats() 155 | 156 | def _handle_line_waiver(self, line_waivers, filename, lineno, match, log): 157 | line_waivers.setdefault(filename, {}) 158 | rules = match.split(',') 159 | for rule in rules: 160 | rule = rule.strip() 161 | line_waivers[filename].setdefault(rule, []) 162 | line_waivers[filename][rule].append(lineno) 163 | continue 164 | 165 | def _handle_block_start(self, block_waivers, filename, lineno, match, log): 166 | block_waivers.setdefault(filename, {}) 167 | rules = match.split(',') 168 | for rule in rules: 169 | rule = rule.strip() 170 | # Check to see if the last 'disable' has a matching 'enable' 171 | if rule in block_waivers[filename]: 172 | if block_waivers[filename][rule][-1][1] is None: 173 | log.error("In %s, %s has a 'disable' on line %s and %s without an 'enable' in between", filename, 174 | rule, block_waivers[filename][rule][-1][0], lineno) 175 | else: 176 | # previous disable/enable is coherent so we can add a new entry to the list 177 | block_waivers[filename][rule].append([lineno, None]) 178 | else: 179 | block_waivers[filename][rule] = [[lineno, None]] 180 | 181 | def _handle_block_end(self, block_waivers, filename, lineno, match, log): 182 | if filename not in block_waivers: 183 | log.error("In %s, 'enable' pragmas on line %s for '%s' appears before any 'disable' pragmas", filename, 184 | lineno, match) 185 | return 186 | rules = match.split(',') 187 | for rule in rules: 188 | rule = rule.strip() 189 | if rule not in block_waivers[filename]: 190 | log.error("In %s, 'enable' pragma for %s on line %s appears before any 'disable' pragmas", filename, 191 | rule, lineno) 192 | return 193 | if block_waivers[filename][rule][-1][1] is None: 194 | block_waivers[filename][rule][-1][1] = lineno 195 | else: 196 | log.error( 197 | "In %s, 'enable' pragma for %s on line %s doesn't have a matching 'disable'. Previous ['disable', 'enable'] are on lines %s", 198 | filename, rule, lineno, str(block_waivers[filename][rule][-1])) 199 | 200 | def _check_block_waivers(self, block_waivers, log): 201 | for filename, rule_dict in block_waivers.items(): 202 | for rule, waiver_list in rule_dict.items(): 203 | if waiver_list[-1][1] is None: 204 | log.error("In %s, couldn't find a matching 'enable' for %s. The 'disable' is on line %s", filename, 205 | rule, waiver_list[-1][0]) 206 | # Remove the partial block waiver from the list since it's incomplete 207 | del waiver_list[-1] 208 | 209 | def prep_file_stats(self): 210 | 211 | self.files_with_notes = {} 212 | 213 | for issue in self.issues: 214 | if not issue.waived: 215 | self.files_with_notes.setdefault(issue.filename, 0) 216 | self.files_with_notes[issue.filename] += 1 217 | 218 | def rtl_dir_from_path(file_path): 219 | orig_path = file_path 220 | loop_count = 0 221 | base_dir = None 222 | while os.path.basename(file_path) not in ['rtl', 'analog'] and loop_count <= 10: 223 | base_dir = os.path.basename(file_path) 224 | file_path = os.path.split(file_path)[0] 225 | loop_count += 10 226 | 227 | if loop_count == 10: 228 | log.info("Couldn't resolve base directory for {}".format(orig_path)) 229 | return orig_path 230 | 231 | return os.path.join(file_path, base_dir) 232 | 233 | for issue in self.issues: 234 | if not issue.waived: 235 | rtl_dir = rtl_dir_from_path(issue.filename) 236 | self.dirs_with_notes.setdefault(rtl_dir, 0) 237 | self.dirs_with_notes[rtl_dir] += 1 238 | 239 | def _waived_unwaived(self, level): 240 | issues = getattr(self, level.strip()) 241 | waived = sum([i.waived for i in issues]) 242 | unwaived = len(issues) - waived 243 | if unwaived: 244 | log.error("Found %3d %s (+%3d waived)", unwaived, level, waived) 245 | else: 246 | log.info("Found %3d %s (+%3d waived)", unwaived, level, waived) 247 | 248 | def stats(self): 249 | for info in self.infos: 250 | if not info.waived: 251 | log.error("%s", info) 252 | elif options.show_waived: 253 | log.info("%s", info) 254 | 255 | for warning in self.warnings: 256 | if not warning.waived: 257 | log.error("%s", warning) 258 | elif options.show_waived: 259 | log.info("%s", warning) 260 | 261 | for error in self.errors: 262 | if not error.waived: 263 | log.error("%s", error) 264 | elif options.show_waived: 265 | log.info("%s", error) 266 | 267 | log.debug("The following files have unwaived issues:") 268 | sorted_files = sorted(self.files_with_notes.items(), key=lambda x: x[1]) 269 | for file_tuple in sorted_files: 270 | log.debug("{file_name}: {count}".format(file_name=file_tuple[0], count=file_tuple[1])) 271 | 272 | log.debug("The following directories have unwaived issues:") 273 | sorted_dirs = sorted(self.dirs_with_notes.items(), key=lambda x: x[1]) 274 | for dir_tuple in sorted_dirs: 275 | log.debug("{}: {}".format(dir_tuple[0], dir_tuple[1])) 276 | 277 | self._waived_unwaived('infos') 278 | self._waived_unwaived('warnings') 279 | self._waived_unwaived('errors ') 280 | 281 | 282 | def main(options, log): 283 | try: 284 | newest_lint_log = AscentLintLog("lint.rpt", log) 285 | newest_lint_log.stats() 286 | except Exception as exc: 287 | log.error("Failed to parse lint.rpt") 288 | 289 | log.exit_if_warnings_or_errors("Lint parsing failed due to previous errors") 290 | 291 | 292 | if __name__ == '__main__': 293 | options = parse_args(sys.argv[1:]) 294 | verbosity = cmn_logging.INFO if options.tool_debug else cmn_logging.DEBUG 295 | log = cmn_logging.build_logger("bazel_lint.log", level=verbosity) 296 | main(options, log) 297 | -------------------------------------------------------------------------------- /bin/lint_parser_hal.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Parses results from Cadence HAL lint""" 3 | 4 | ################################################################################ 5 | # stdlib 6 | import argparse 7 | import os 8 | import re 9 | import subprocess 10 | import sys 11 | 12 | ################################################################################ 13 | # Bigger libraries (better to place these later for dependency ordering 14 | import bs4 15 | 16 | ################################################################################ 17 | # Checkout specific libraries 18 | import cmn_logging 19 | 20 | log = None 21 | 22 | LOG_INDENT = ' ' * 9 23 | 24 | ################################################################################ 25 | # Constants 26 | WAIVER_REGEXP = re.compile("\s// lint: disable=(.*)") 27 | 28 | ################################################################################ 29 | # Helpers 30 | 31 | 32 | def parse_args(argv): 33 | parser = argparse.ArgumentParser(description="Parse output report from HAL Lint using inline RTL waivers", 34 | formatter_class=argparse.RawTextHelpFormatter) 35 | 36 | parser.add_argument('--tool-debug', 37 | default=False, 38 | action='store_true', 39 | help='Set the verbosity of this tool to debug level.') 40 | parser.add_argument("--bazel-target", default="lint_top", help="bazel target to use for lint") 41 | parser.add_argument("--sw", 42 | dest="show_waived", 43 | default=False, 44 | action='store_true', 45 | help='Show previously waived messages.') 46 | parser.add_argument("--sh", 47 | dest="show_help", 48 | action='store_true', 49 | help="Display the help message from hal for each individual issue") 50 | parser.add_argument("--waiver-direct", 51 | help="Direct waiver regex for when inline pragmas and design_info are insufficient") 52 | options = parser.parse_args(argv) 53 | return options 54 | 55 | 56 | def find_bazel_runfiles(relpath, bazel_target): 57 | p = subprocess.Popen("bazel info", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 58 | p.wait() 59 | assert p.returncode == 0 60 | stdout, stderr = p.communicate() 61 | bazel_bin = re.search("bazel-bin: (.*)", stdout.decode('ascii')).group(1) 62 | runfiles_main = os.path.join(bazel_bin, relpath, "{}.runfiles".format(bazel_target), "__main__") 63 | return runfiles_main 64 | 65 | 66 | ################################################################################ 67 | # Classes 68 | 69 | 70 | class HalMessage(object): 71 | 72 | def __init__(self, errcode, severity, info, source_line, filename, lineno, help_msg): 73 | self.errcode = errcode # ID 74 | self.severity = severity 75 | self.info = info 76 | self.source_line = source_line 77 | self.filename = filename 78 | self.lineno = lineno 79 | self.help_msg = help_msg 80 | 81 | self.waived = False 82 | 83 | def __repr__(self): 84 | message = "{}:{}:{} {}\n{}{}".format(self.filename, self.lineno, self.errcode, self.info, LOG_INDENT, 85 | self.source_line) 86 | if options.show_help: 87 | message += "\n\n{}{}".format(LOG_INDENT, self.help_msg) 88 | return message 89 | 90 | @classmethod 91 | def from_soup(cls, soup): 92 | errcode = soup.id.text.strip() 93 | severity = soup.severity.text.strip() 94 | info = soup.info.text.strip() 95 | try: 96 | source_line = soup.source_line.text.strip() 97 | except AttributeError: 98 | source_line = "" 99 | try: 100 | file_info = soup.file_info.text.strip() 101 | match = re.search('{"([^"]+)" ([0-9]+) [0-9]+}', file_info) 102 | filename = match.group(1) 103 | lineno = match.group(2) 104 | except AttributeError: 105 | filename = "" 106 | lineno = "" 107 | try: 108 | help_msg = soup.help.text.strip() 109 | except AttributeError: 110 | help_msg = "" 111 | 112 | return cls(errcode, severity, info, source_line, filename, lineno, help_msg) 113 | 114 | 115 | class HalLintLog(object): 116 | 117 | def __init__(self, path, waiver_direct): 118 | self.issues = [] 119 | self.files_with_notes = {} 120 | self.dirs_with_notes = {} 121 | self.waiver_direct_regex = re.compile(waiver_direct) 122 | 123 | with open(path, 'r', encoding='utf-8', errors='replace') as logp: 124 | text = logp.read() 125 | 126 | # Cadence uses cdata in their xml output, need to avoid lxml parser which strips it out 127 | soup = bs4.BeautifulSoup(text, "html.parser") 128 | messages = soup.findAll('message') 129 | 130 | for message in messages: 131 | self.issues.append(HalMessage.from_soup(message)) 132 | 133 | # Ignore notes, not sure if we should do this or flag these as well 134 | self.issues = [i for i in self.issues if i.severity != 'info'] 135 | 136 | self.warnings = [issue for issue in self.issues if issue.severity == 'warning'] 137 | self.errors = [issue for issue in self.issues if issue.severity == 'error'] 138 | self.fatals = [issue for issue in self.issues if issue.severity == 'fatal'] 139 | 140 | self.files_with_notes = set([issue.filename for issue in self.issues]) 141 | 142 | # Each entry in this set is a tuple of (filename, lineno, errcode) 143 | waivers = set() 144 | for filename in self.files_with_notes: 145 | if filename == "": 146 | continue 147 | 148 | with open(filename, errors='replace') as filep: 149 | for i, line in enumerate(filep.readlines()): 150 | match = WAIVER_REGEXP.search(line) 151 | if match: 152 | rules = match.group(1).split(',') 153 | for rule in rules: 154 | waivers.add((filename, str(i + 1), rule.strip())) 155 | 156 | for issue in self.issues: 157 | # Only apply a direct waiver if the filename and lineno are empty, meaning HAL didn't render the error correctly 158 | if (issue.filename, issue.lineno, issue.errcode) in waivers: 159 | issue.waived = True 160 | elif issue.filename == "" and issue.lineno == "" and self.waiver_direct_regex.search(issue.info): 161 | issue.waived = True 162 | 163 | self.prep_file_stats() 164 | 165 | def prep_file_stats(self): 166 | 167 | self.files_with_notes = {} 168 | 169 | for issue in self.issues: 170 | if not issue.waived: 171 | self.files_with_notes.setdefault(issue.filename, 0) 172 | self.files_with_notes[issue.filename] += 1 173 | 174 | def rtl_dir_from_path(file_path): 175 | orig_path = file_path 176 | loop_count = 0 177 | base_dir = None 178 | while os.path.basename(file_path) not in ['rtl', 'analog'] and loop_count <= 10: 179 | base_dir = os.path.basename(file_path) 180 | file_path = os.path.split(file_path)[0] 181 | loop_count += 10 182 | 183 | if loop_count == 10: 184 | log.info("Couldn't resolve base directory for {}".format(orig_path)) 185 | return orig_path 186 | 187 | return os.path.join(file_path, base_dir) 188 | 189 | for issue in self.issues: 190 | if not issue.waived: 191 | rtl_dir = rtl_dir_from_path(issue.filename) 192 | self.dirs_with_notes.setdefault(rtl_dir, 0) 193 | self.dirs_with_notes[rtl_dir] += 1 194 | 195 | def _waived_unwaived(self, level): 196 | issues = getattr(self, level.strip()) 197 | waived = sum([i.waived for i in issues]) 198 | unwaived = len(issues) - waived 199 | if unwaived: 200 | log.error("Found %3d %s (+%3d waived)", unwaived, level, waived) 201 | else: 202 | log.info("Found %3d %s (+%3d waived)", unwaived, level, waived) 203 | 204 | def stats(self): 205 | for warning in self.warnings: 206 | if not warning.waived: 207 | log.warn("%s", warning) 208 | elif options.show_waived: 209 | log.info("%s", warning) 210 | 211 | for error in self.errors: 212 | if not error.waived: 213 | log.error("%s", error) 214 | elif options.show_waived: 215 | log.info("%s", error) 216 | 217 | for fatal in self.fatals: 218 | log.error("%s", fatal) 219 | 220 | log.info("The following files have unwaived issues:") 221 | sorted_files = sorted(self.files_with_notes.items(), key=lambda x: x[1]) 222 | for file_tuple in sorted_files: 223 | log.info("{file_name}: {count}".format(file_name=file_tuple[0], count=file_tuple[1])) 224 | 225 | log.info("The following directories have unwaived issues:") 226 | sorted_dirs = sorted(self.dirs_with_notes.items(), key=lambda x: x[1]) 227 | for dir_tuple in sorted_dirs: 228 | log.info("{}: {}".format(dir_tuple[0], dir_tuple[1])) 229 | 230 | self._waived_unwaived('warnings') 231 | self._waived_unwaived('errors ') 232 | self._waived_unwaived('fatals ') 233 | 234 | 235 | def main(options, log): 236 | xml_logfile = "xrun.log.xml" 237 | text_logfile = "xrun.log" 238 | 239 | if not os.path.exists(xml_logfile): 240 | log.error("XML logfile doesn't exist, something probably went pretty wrong earlier") 241 | elif os.path.getsize(xml_logfile) == 0: 242 | log.error("XML Logfile was 0 bytes, something probably went pretty wrong earlier") 243 | 244 | log.info("Text Logfile: %s", text_logfile) 245 | log.info("XML Logfile: %s", xml_logfile) 246 | 247 | try: 248 | newest_lint_log = HalLintLog(xml_logfile, options.waiver_direct) 249 | newest_lint_log.stats() 250 | except Exception as exc: 251 | log.error("Failed to parse lint log file: %s", exc) 252 | 253 | log.exit_if_warnings_or_errors("Previous errors") 254 | 255 | 256 | if __name__ == '__main__': 257 | options = parse_args(sys.argv[1:]) 258 | verbosity = cmn_logging.DEBUG if options.tool_debug else cmn_logging.INFO 259 | log = cmn_logging.build_logger("lint", level=verbosity) 260 | main(options, log) 261 | -------------------------------------------------------------------------------- /deps.bzl: -------------------------------------------------------------------------------- 1 | """External dependencies for rules_verilog""" 2 | 3 | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") 4 | 5 | def _maybe(repo_rule, name, **kwargs): 6 | if name not in native.existing_rules(): 7 | repo_rule(name = name, **kwargs) 8 | 9 | def verilog_dependencies(): 10 | _maybe( 11 | repo_rule = http_archive, 12 | name = "rules_python", 13 | url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz", 14 | sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0", 15 | ) 16 | -------------------------------------------------------------------------------- /docs/BUILD: -------------------------------------------------------------------------------- 1 | load("@io_bazel_stardoc//stardoc:stardoc.bzl", "stardoc") 2 | load("@bazel_skylib//:bzl_library.bzl", "bzl_library") 3 | 4 | bzl_library( 5 | name = "defs_rules", 6 | srcs = [ 7 | "@rules_verilog//verilog:defs.bzl", 8 | "@rules_verilog//verilog/private:dv.bzl", 9 | "@rules_verilog//verilog/private:rtl.bzl", 10 | "@rules_verilog//verilog/private:verilog.bzl", 11 | ], 12 | ) 13 | 14 | stardoc( 15 | name = "defs_docs", 16 | out = "defs.md", 17 | input = "@rules_verilog//verilog:defs.bzl", 18 | deps = [":defs_rules"], 19 | ) 20 | -------------------------------------------------------------------------------- /env/.style.yapf: -------------------------------------------------------------------------------- 1 | # -*- toml -*- 2 | [style] 3 | # Align closing bracket with visual indentation. 4 | align_closing_bracket_with_visual_indent=True 5 | 6 | # Allow dictionary keys to exist on multiple lines. For example: 7 | # 8 | # x = { 9 | # ('this is the first element of a tuple', 10 | # 'this is the second element of a tuple'): 11 | # value, 12 | # } 13 | allow_multiline_dictionary_keys=False 14 | 15 | # Allow lambdas to be formatted on more than one line. 16 | allow_multiline_lambdas=False 17 | 18 | # Allow splitting before a default / named assignment in an argument list. 19 | allow_split_before_default_or_named_assigns=True 20 | 21 | # Allow splits before the dictionary value. 22 | allow_split_before_dict_value=True 23 | 24 | # Let spacing indicate operator precedence. For example: 25 | # 26 | # a = 1 * 2 + 3 / 4 27 | # b = 1 / 2 - 3 * 4 28 | # c = (1 + 2) * (3 - 4) 29 | # d = (1 - 2) / (3 + 4) 30 | # e = 1 * 2 - 3 31 | # f = 1 + 2 + 3 + 4 32 | # 33 | # will be formatted as follows to indicate precedence: 34 | # 35 | # a = 1*2 + 3/4 36 | # b = 1/2 - 3*4 37 | # c = (1+2) * (3-4) 38 | # d = (1-2) / (3+4) 39 | # e = 1*2 - 3 40 | # f = 1 + 2 + 3 + 4 41 | # 42 | arithmetic_precedence_indication=False 43 | 44 | # Number of blank lines surrounding top-level function and class 45 | # definitions. 46 | blank_lines_around_top_level_definition=2 47 | 48 | # Insert a blank line before a class-level docstring. 49 | blank_line_before_class_docstring=False 50 | 51 | # Insert a blank line before a module docstring. 52 | blank_line_before_module_docstring=False 53 | 54 | # Insert a blank line before a 'def' or 'class' immediately nested 55 | # within another 'def' or 'class'. For example: 56 | # 57 | # class Foo: 58 | # # <------ this blank line 59 | # def method(): 60 | # ... 61 | blank_line_before_nested_class_or_def=True 62 | 63 | # Do not split consecutive brackets. Only relevant when 64 | # dedent_closing_brackets is set. For example: 65 | # 66 | # call_func_that_takes_a_dict( 67 | # { 68 | # 'key1': 'value1', 69 | # 'key2': 'value2', 70 | # } 71 | # ) 72 | # 73 | # would reformat to: 74 | # 75 | # call_func_that_takes_a_dict({ 76 | # 'key1': 'value1', 77 | # 'key2': 'value2', 78 | # }) 79 | coalesce_brackets=False 80 | 81 | # The column limit. 82 | column_limit=120 83 | 84 | # The style for continuation alignment. Possible values are: 85 | # 86 | # - SPACE: Use spaces for continuation alignment. This is default behavior. 87 | # - FIXED: Use fixed number (CONTINUATION_INDENT_WIDTH) of columns 88 | # (ie: CONTINUATION_INDENT_WIDTH/INDENT_WIDTH tabs or 89 | # CONTINUATION_INDENT_WIDTH spaces) for continuation alignment. 90 | # - VALIGN-RIGHT: Vertically align continuation lines to multiple of 91 | # INDENT_WIDTH columns. Slightly right (one tab or a few spaces) if 92 | # cannot vertically align continuation lines with indent characters. 93 | continuation_align_style=SPACE 94 | 95 | # Indent width used for line continuations. 96 | continuation_indent_width=4 97 | 98 | # Put closing brackets on a separate line, dedented, if the bracketed 99 | # expression can't fit in a single line. Applies to all kinds of brackets, 100 | # including function definitions and calls. For example: 101 | # 102 | # config = { 103 | # 'key1': 'value1', 104 | # 'key2': 'value2', 105 | # } # <--- this bracket is dedented and on a separate line 106 | # 107 | # time_series = self.remote_client.query_entity_counters( 108 | # entity='dev3246.region1', 109 | # key='dns.query_latency_tcp', 110 | # transform=Transformation.AVERAGE(window=timedelta(seconds=60)), 111 | # start_ts=now()-timedelta(days=3), 112 | # end_ts=now(), 113 | # ) # <--- this bracket is dedented and on a separate line 114 | dedent_closing_brackets=False 115 | 116 | # Disable the heuristic which places each list element on a separate line 117 | # if the list is comma-terminated. 118 | disable_ending_comma_heuristic=False 119 | 120 | # Place each dictionary entry onto its own line. 121 | each_dict_entry_on_separate_line=True 122 | 123 | # Require multiline dictionary even if it would normally fit on one line. 124 | # For example: 125 | # 126 | # config = { 127 | # 'key1': 'value1' 128 | # } 129 | force_multiline_dict=False 130 | 131 | # The regex for an i18n comment. The presence of this comment stops 132 | # reformatting of that line, because the comments are required to be 133 | # next to the string they translate. 134 | i18n_comment= 135 | 136 | # The i18n function call names. The presence of this function stops 137 | # reformattting on that line, because the string it has cannot be moved 138 | # away from the i18n comment. 139 | i18n_function_call= 140 | 141 | # Indent blank lines. 142 | indent_blank_lines=False 143 | 144 | # Put closing brackets on a separate line, indented, if the bracketed 145 | # expression can't fit in a single line. Applies to all kinds of brackets, 146 | # including function definitions and calls. For example: 147 | # 148 | # config = { 149 | # 'key1': 'value1', 150 | # 'key2': 'value2', 151 | # } # <--- this bracket is indented and on a separate line 152 | # 153 | # time_series = self.remote_client.query_entity_counters( 154 | # entity='dev3246.region1', 155 | # key='dns.query_latency_tcp', 156 | # transform=Transformation.AVERAGE(window=timedelta(seconds=60)), 157 | # start_ts=now()-timedelta(days=3), 158 | # end_ts=now(), 159 | # ) # <--- this bracket is indented and on a separate line 160 | indent_closing_brackets=False 161 | 162 | # Indent the dictionary value if it cannot fit on the same line as the 163 | # dictionary key. For example: 164 | # 165 | # config = { 166 | # 'key1': 167 | # 'value1', 168 | # 'key2': value1 + 169 | # value2, 170 | # } 171 | indent_dictionary_value=False 172 | 173 | # The number of columns to use for indentation. 174 | indent_width=4 175 | 176 | # Join short lines into one line. E.g., single line 'if' statements. 177 | join_multiple_lines=True 178 | 179 | # Do not include spaces around selected binary operators. For example: 180 | # 181 | # 1 + 2 * 3 - 4 / 5 182 | # 183 | # will be formatted as follows when configured with "*,/": 184 | # 185 | # 1 + 2*3 - 4/5 186 | no_spaces_around_selected_binary_operators= 187 | 188 | # Use spaces around default or named assigns. 189 | spaces_around_default_or_named_assign=False 190 | 191 | # Adds a space after the opening '{' and before the ending '}' dict delimiters. 192 | # 193 | # {1: 2} 194 | # 195 | # will be formatted as: 196 | # 197 | # { 1: 2 } 198 | spaces_around_dict_delimiters=False 199 | 200 | # Adds a space after the opening '[' and before the ending ']' list delimiters. 201 | # 202 | # [1, 2] 203 | # 204 | # will be formatted as: 205 | # 206 | # [ 1, 2 ] 207 | spaces_around_list_delimiters=False 208 | 209 | # Use spaces around the power operator. 210 | spaces_around_power_operator=False 211 | 212 | # Use spaces around the subscript / slice operator. For example: 213 | # 214 | # my_list[1 : 10 : 2] 215 | spaces_around_subscript_colon=False 216 | 217 | # Adds a space after the opening '(' and before the ending ')' tuple delimiters. 218 | # 219 | # (1, 2, 3) 220 | # 221 | # will be formatted as: 222 | # 223 | # ( 1, 2, 3 ) 224 | spaces_around_tuple_delimiters=False 225 | 226 | # The number of spaces required before a trailing comment. 227 | # This can be a single value (representing the number of spaces 228 | # before each trailing comment) or list of values (representing 229 | # alignment column values; trailing comments within a block will 230 | # be aligned to the first column value that is greater than the maximum 231 | # line length within the block). For example: 232 | # 233 | # With spaces_before_comment=5: 234 | # 235 | # 1 + 1 # Adding values 236 | # 237 | # will be formatted as: 238 | # 239 | # 1 + 1 # Adding values <-- 5 spaces between the end of the statement and comment 240 | # 241 | # With spaces_before_comment=15, 20: 242 | # 243 | # 1 + 1 # Adding values 244 | # two + two # More adding 245 | # 246 | # longer_statement # This is a longer statement 247 | # short # This is a shorter statement 248 | # 249 | # a_very_long_statement_that_extends_beyond_the_final_column # Comment 250 | # short # This is a shorter statement 251 | # 252 | # will be formatted as: 253 | # 254 | # 1 + 1 # Adding values <-- end of line comments in block aligned to col 15 255 | # two + two # More adding 256 | # 257 | # longer_statement # This is a longer statement <-- end of line comments in block aligned to col 20 258 | # short # This is a shorter statement 259 | # 260 | # a_very_long_statement_that_extends_beyond_the_final_column # Comment <-- the end of line comments are aligned based on the line length 261 | # short # This is a shorter statement 262 | # 263 | spaces_before_comment=1 264 | 265 | # Insert a space between the ending comma and closing bracket of a list, 266 | # etc. 267 | space_between_ending_comma_and_closing_bracket=True 268 | 269 | # Use spaces inside brackets, braces, and parentheses. For example: 270 | # 271 | # method_call( 1 ) 272 | # my_dict[ 3 ][ 1 ][ get_index( *args, **kwargs ) ] 273 | # my_set = { 1, 2, 3 } 274 | space_inside_brackets=False 275 | 276 | # Split before arguments 277 | split_all_comma_separated_values=False 278 | 279 | # Split before arguments, but do not split all subexpressions recursively 280 | # (unless needed). 281 | split_all_top_level_comma_separated_values=False 282 | 283 | # Split before arguments if the argument list is terminated by a 284 | # comma. 285 | split_arguments_when_comma_terminated=False 286 | 287 | # Set to True to prefer splitting before '+', '-', '*', '/', '//', or '@' 288 | # rather than after. 289 | split_before_arithmetic_operator=False 290 | 291 | # Set to True to prefer splitting before '&', '|' or '^' rather than 292 | # after. 293 | split_before_bitwise_operator=True 294 | 295 | # Split before the closing bracket if a list or dict literal doesn't fit on 296 | # a single line. 297 | split_before_closing_bracket=True 298 | 299 | # Split before a dictionary or set generator (comp_for). For example, note 300 | # the split before the 'for': 301 | # 302 | # foo = { 303 | # variable: 'Hello world, have a nice day!' 304 | # for variable in bar if variable != 42 305 | # } 306 | split_before_dict_set_generator=True 307 | 308 | # Split before the '.' if we need to split a longer expression: 309 | # 310 | # foo = ('This is a really long string: {}, {}, {}, {}'.format(a, b, c, d)) 311 | # 312 | # would reformat to something like: 313 | # 314 | # foo = ('This is a really long string: {}, {}, {}, {}' 315 | # .format(a, b, c, d)) 316 | split_before_dot=False 317 | 318 | # Split after the opening paren which surrounds an expression if it doesn't 319 | # fit on a single line. 320 | split_before_expression_after_opening_paren=False 321 | 322 | # If an argument / parameter list is going to be split, then split before 323 | # the first argument. 324 | split_before_first_argument=False 325 | 326 | # Set to True to prefer splitting before 'and' or 'or' rather than 327 | # after. 328 | split_before_logical_operator=True 329 | 330 | # Split named assignments onto individual lines. 331 | split_before_named_assigns=True 332 | 333 | # Set to True to split list comprehensions and generators that have 334 | # non-trivial expressions and multiple clauses before each of these 335 | # clauses. For example: 336 | # 337 | # result = [ 338 | # a_long_var + 100 for a_long_var in xrange(1000) 339 | # if a_long_var % 10] 340 | # 341 | # would reformat to something like: 342 | # 343 | # result = [ 344 | # a_long_var + 100 345 | # for a_long_var in xrange(1000) 346 | # if a_long_var % 10] 347 | split_complex_comprehension=False 348 | 349 | # The penalty for splitting right after the opening bracket. 350 | split_penalty_after_opening_bracket=300 351 | 352 | # The penalty for splitting the line after a unary operator. 353 | split_penalty_after_unary_operator=10000 354 | 355 | # The penalty of splitting the line around the '+', '-', '*', '/', '//', 356 | # ``%``, and '@' operators. 357 | split_penalty_arithmetic_operator=300 358 | 359 | # The penalty for splitting right before an if expression. 360 | split_penalty_before_if_expr=0 361 | 362 | # The penalty of splitting the line around the '&', '|', and '^' 363 | # operators. 364 | split_penalty_bitwise_operator=300 365 | 366 | # The penalty for splitting a list comprehension or generator 367 | # expression. 368 | split_penalty_comprehension=80 369 | 370 | # The penalty for characters over the column limit. 371 | split_penalty_excess_character=7000 372 | 373 | # The penalty incurred by adding a line split to the unwrapped line. The 374 | # more line splits added the higher the penalty. 375 | split_penalty_for_added_line_split=30 376 | 377 | # The penalty of splitting a list of "import as" names. For example: 378 | # 379 | # from a_very_long_or_indented_module_name_yada_yad import (long_argument_1, 380 | # long_argument_2, 381 | # long_argument_3) 382 | # 383 | # would reformat to something like: 384 | # 385 | # from a_very_long_or_indented_module_name_yada_yad import ( 386 | # long_argument_1, long_argument_2, long_argument_3) 387 | split_penalty_import_names=0 388 | 389 | # The penalty of splitting the line around the 'and' and 'or' 390 | # operators. 391 | split_penalty_logical_operator=300 392 | 393 | # Use the Tab character for indentation. 394 | use_tabs=False 395 | 396 | -------------------------------------------------------------------------------- /env/azure_pipeline.yaml: -------------------------------------------------------------------------------- 1 | # Triggers are currently handled by branch policy for build validation in azure devops 2 | trigger: none 3 | # trigger: 4 | # batch: true 5 | # branches: 6 | # include: 7 | # - master 8 | 9 | stages: 10 | - stage: 'all' 11 | jobs: 12 | - job: 'CI_no_license' 13 | pool: 14 | name: CI_no_license 15 | steps: 16 | # Print the pwd to make it easier to find the area where this was run 17 | - script: | 18 | pwd 19 | displayName: 'Display PWD' 20 | 21 | - script: | 22 | source activate digital 23 | yapf --diff --recursive . --style env/.style.yapf 24 | # Do an automatic regen with 25 | # yapf --in-place --recursive . --style env/.style.yapf 26 | displayName: 'yapf Check' 27 | 28 | - script: | 29 | source activate digital 30 | bazel run --nokeep_state_after_build //tests:buildifier_format_diff 31 | bazel run --nokeep_state_after_build //tests:buildifier_lint 32 | displayName: 'BUILD Lint Check' 33 | 34 | - script: | 35 | bazel clean --expunge 36 | ./tests/doc_test.sh 37 | displayName: 'Doc Generation Check' 38 | 39 | - script: | 40 | source activate digital 41 | bazel test --cache_test_results=no --jobs 8 --test_output=all $(bazel query "tests(//...) except attr(tags, 'no_ci_gate', //...) except attr(tags, 'requires_license', //...)") 42 | displayName: 'Bazel tests (no licenses)' 43 | 44 | - task: PublishTestResults@2 45 | condition: succeededOrFailed() 46 | inputs: 47 | testResultsFiles: 'bazel-testlogs/**/*.xml' 48 | testRunTitle: 'Publish test results.' 49 | displayName: 'Bazel Tests' 50 | 51 | - job: 'CI' 52 | pool: 53 | name: CI 54 | steps: 55 | # Print the pwd to make it easier to find the area where this was run 56 | - script: | 57 | pwd 58 | displayName: 'Display PWD' 59 | 60 | - bash: | 61 | source activate digital 62 | find -L `bazel info bazel-testlogs` -name "test.xml" | xargs rm 63 | bazel test --cache_test_results=no --jobs 8 --test_output=all $(bazel query "attr(tags, 'requires_license', tests(//...)) except attr(tags, 'no_ci_gate', //...)") 64 | displayName: 'Bazel Tests' 65 | 66 | - task: PublishTestResults@2 67 | condition: succeededOrFailed() 68 | inputs: 69 | testResultsFiles: 'bazel-testlogs/**/*.xml' 70 | testRunTitle: 'Publish test results.' 71 | displayName: 'Bazel Tests Publish Results' 72 | -------------------------------------------------------------------------------- /examples/apb/BUILD: -------------------------------------------------------------------------------- 1 | load("@rules_verilog//verilog:defs.bzl", "verilog_rtl_library", "verilog_rtl_pkg", "verilog_rtl_unit_test") 2 | 3 | verilog_rtl_pkg( 4 | name = "apb_pkg", 5 | direct = ["apb_pkg.sv"], 6 | ) 7 | 8 | verilog_rtl_library( 9 | name = "apb", 10 | modules = [ 11 | "apb.sv", 12 | ], 13 | deps = [ 14 | ":apb_pkg", 15 | ], 16 | ) 17 | 18 | verilog_rtl_library( 19 | name = "unit_test_top", 20 | modules = ["unit_test_top.sv"], 21 | deps = [":apb"], 22 | ) 23 | 24 | verilog_rtl_unit_test( 25 | name = "test", 26 | tags = [ 27 | "lic_xcelium", 28 | "requires_license", 29 | ], 30 | deps = [":unit_test_top"], 31 | ) 32 | -------------------------------------------------------------------------------- /examples/apb/apb.sv: -------------------------------------------------------------------------------- 1 | module apb; 2 | 3 | initial begin 4 | $display("apb addr_width:", apb_pkg::ADDR_WIDTH); 5 | end 6 | 7 | endmodule : apb 8 | -------------------------------------------------------------------------------- /examples/apb/apb_pkg.sv: -------------------------------------------------------------------------------- 1 | package apb_pkg; 2 | 3 | localparam ADDR_WIDTH=12; 4 | 5 | endpackage : apb_pkg 6 | -------------------------------------------------------------------------------- /examples/apb/unit_test_top.sv: -------------------------------------------------------------------------------- 1 | module unit_test_top (); 2 | 3 | apb u_apb(); 4 | 5 | endmodule : unit_test_top 6 | -------------------------------------------------------------------------------- /examples/dpi/BUILD: -------------------------------------------------------------------------------- 1 | load("@rules_verilog//verilog:defs.bzl", "verilog_dv_library", "verilog_dv_unit_test", "verilog_rtl_library") 2 | load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") 3 | 4 | cc_library( 5 | name = "dpi_c", 6 | srcs = ["dpi.c"], 7 | hdrs = ["dpi.h"], 8 | ) 9 | 10 | cc_test( 11 | name = "dpi_c_test", 12 | srcs = ["dpi_test.c"], 13 | deps = [":dpi_c"], 14 | ) 15 | 16 | verilog_dv_library( 17 | name = "pkg", 18 | srcs = glob( 19 | ["*.sv"], 20 | exclude = ["unit_test_top.sv"], 21 | ), 22 | dpi = [":dpi_c"], 23 | in_flist = ["dpi_pkg.sv"], 24 | ) 25 | 26 | verilog_rtl_library( 27 | name = "unit_test_top", 28 | direct = ["unit_test_top.sv"], 29 | deps = [":pkg"], 30 | ) 31 | 32 | verilog_dv_unit_test( 33 | name = "test", 34 | tags = [ 35 | "lic_xcelium", 36 | "requires_license", 37 | ], 38 | deps = [":unit_test_top"], 39 | ) 40 | -------------------------------------------------------------------------------- /examples/dpi/dpi.c: -------------------------------------------------------------------------------- 1 | // A trivial example showing how to pull in C through the DPI. 2 | 3 | #include 4 | 5 | void echo_hello() { 6 | printf("Inside echo_hello in C\n"); 7 | } 8 | -------------------------------------------------------------------------------- /examples/dpi/dpi.h: -------------------------------------------------------------------------------- 1 | void echo_hello(); 2 | -------------------------------------------------------------------------------- /examples/dpi/dpi_pkg.sv: -------------------------------------------------------------------------------- 1 | package dpi_pkg; 2 | 3 | import "DPI-C" function void echo_hello(); 4 | 5 | endpackage : dpi_pkg 6 | -------------------------------------------------------------------------------- /examples/dpi/dpi_test.c: -------------------------------------------------------------------------------- 1 | // Example of testing C code without SV 2 | 3 | #include 4 | 5 | #include "dpi.h" 6 | 7 | int main() { 8 | // Pretend there are unit tests here 9 | echo_hello(); 10 | return 0; 11 | } 12 | -------------------------------------------------------------------------------- /examples/dpi/unit_test_top.sv: -------------------------------------------------------------------------------- 1 | module unit_test_top; 2 | 3 | initial begin 4 | $display("In unit_test_top initial block."); 5 | dpi_pkg::echo_hello(); 6 | end 7 | 8 | endmodule : unit_test_top 9 | -------------------------------------------------------------------------------- /lib/BUILD: -------------------------------------------------------------------------------- 1 | load("@rules_python//python:defs.bzl", "py_library") 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | py_library( 6 | name = "cmn_logging", 7 | srcs = ["cmn_logging.py"], 8 | ) 9 | 10 | py_library( 11 | name = "job_lib", 12 | srcs = ["job_lib.py"], 13 | ) 14 | 15 | py_library( 16 | name = "parser_actions", 17 | srcs = ["parser_actions.py"], 18 | ) 19 | 20 | py_library( 21 | name = "regression", 22 | srcs = ["regression.py"], 23 | deps = ["rv_utils"], 24 | ) 25 | 26 | py_library( 27 | name = "rv_utils", 28 | srcs = ["rv_utils.py"], 29 | ) 30 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Lightelligence/rules_verilog/187ba1223b2eec2a10d533eb3a13bfbdc2911676/lib/__init__.py -------------------------------------------------------------------------------- /lib/cmn_logging.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Add new functionality to allow logger to exit.""" 3 | 4 | import logging 5 | import sys 6 | from datetime import datetime 7 | 8 | CRITICAL = logging.CRITICAL 9 | FATAL = logging.FATAL 10 | ERROR = logging.ERROR 11 | WARNING = logging.WARNING 12 | WARN = logging.WARN 13 | INFO = logging.INFO 14 | DEBUG = logging.DEBUG 15 | NOTSET = logging.NOTSET 16 | 17 | # Override standard level display names to be fixed width 18 | LEVEL_NAMES = {'INFO': '%I', 'DEBUG': '%D', 'ERROR': '%E', 'WARNING': '%W', 'CRITICAL': '%F'} 19 | 20 | COLOR_RED = '\033[0;31m' 21 | COLOR_NC = '\033[0m' 22 | 23 | # pylint: disable=bad-whitespace 24 | LEVEL_TO_COLOR = { 25 | 'INFO': ("", ""), 26 | 'DEBUG': ("", ""), 27 | 'ERROR': (COLOR_RED, COLOR_NC), 28 | 'WARNING': (COLOR_RED, COLOR_NC), 29 | 'CRITICAL': (COLOR_RED, COLOR_NC) 30 | } 31 | # pylint: enable=bad-whitespace 32 | 33 | 34 | class CmnLogger(logging.getLoggerClass()): 35 | """Extended logger that tracks error counts.""" 36 | 37 | def __init__(self, *args, **kwargs): 38 | self.warn_count = 0 39 | self.error_count = 0 40 | super(CmnLogger, self).__init__(*args, **kwargs) 41 | self.reset_stopwatch() 42 | self._last_message_was_summary = False 43 | 44 | def exit_if_warnings_or_errors(self, *args, **kwargs): 45 | """Log a critical message if any errors or warning have occurred.""" 46 | if self.warn_count or self.error_count: 47 | self.critical(*args, **kwargs) 48 | 49 | def debug(self, *args, **kwargs): # pylint: disable=missing-docstring 50 | self._last_message_was_summary = False 51 | super(CmnLogger, self).debug(*args, **kwargs) 52 | 53 | def info(self, *args, **kwargs): # pylint: disable=missing-docstring 54 | self._last_message_was_summary = False 55 | super(CmnLogger, self).info(*args, **kwargs) 56 | 57 | def warn(self, *args, **kwargs): # pylint: disable=missing-docstring 58 | self.warn_count += 1 59 | self._last_message_was_summary = False 60 | super(CmnLogger, self).warn(*args, **kwargs) 61 | 62 | def error(self, *args, **kwargs): # pylint: disable=missing-docstring 63 | self.error_count += 1 64 | self._last_message_was_summary = False 65 | super(CmnLogger, self).error(*args, **kwargs) 66 | 67 | def critical(self, *args, **kwargs): 68 | """Script will exit with bad status if called.""" 69 | self._last_message_was_summary = False 70 | super(CmnLogger, self).critical(*args, **kwargs) 71 | sys.exit(1) 72 | 73 | def summary(self, *args, **kwargs): 74 | """Add some decoration to make a line pop a bit more.""" 75 | if not self._last_message_was_summary: 76 | self.info('-' * 72) 77 | self.info(*args, **kwargs) 78 | self.info('-' * 72) 79 | self._last_message_was_summary = True 80 | 81 | def reset_stopwatch(self): 82 | self._start_time = datetime.now() 83 | 84 | def stop_stopwatch(self): 85 | self._stop_time = datetime.now() 86 | 87 | @property 88 | def start_time(self): 89 | return self._start_time 90 | 91 | @property 92 | def stop_time(self): 93 | return self._stop_time 94 | 95 | @property 96 | def duration(self): 97 | return self.stop_time - self.start_time 98 | 99 | @property 100 | def duration_in_microseconds(self): 101 | return self.duration.seconds * (10**6) + self.duration.microseconds 102 | 103 | @property 104 | def timestamp(self): 105 | return datetime.now() 106 | 107 | @property 108 | def timestamp_delta(self): 109 | return datetime.now() - self._start_time 110 | 111 | @property 112 | def timestamp_in_microseconds(self): 113 | delta = self.timestamp_delta 114 | return delta.seconds * (10**6) + delta.microseconds 115 | 116 | 117 | class CmnFormatter(logging.Formatter): 118 | """Provide hook for translation from record level to color.""" 119 | 120 | def __init__(self, *args, **kwargs): 121 | self.use_color = kwargs['use_color'] 122 | del kwargs['use_color'] 123 | logging.Formatter.__init__(self, *args, **kwargs) 124 | 125 | def format(self, record): 126 | if self.use_color: 127 | record.color_start, record.color_end = LEVEL_TO_COLOR[record.levelname] 128 | else: 129 | record.color_start, record.color_end = "", "" 130 | # record.levelname = LEVEL_NAMES[record.levelname] 131 | return super(CmnFormatter, self).format(record) 132 | 133 | 134 | def build_logger(name, level=logging.INFO, use_color=False, filehandler=None): 135 | """Create a logger, console handler and formatter. 136 | Do this in one function to allow for more uniformity across tools. 137 | """ 138 | logging.setLoggerClass(CmnLogger) 139 | 140 | # Override standard level display names to be fixed width 141 | logging.addLevelName(INFO, "%I") # pylint: disable=bad-whitespace 142 | logging.addLevelName(DEBUG, "%D") # pylint: disable=bad-whitespace 143 | logging.addLevelName(ERROR, "%E") # pylint: disable=bad-whitespace 144 | logging.addLevelName(WARNING, "%W") # pylint: disable=bad-whitespace 145 | logging.addLevelName(CRITICAL, "%F") # pylint: disable=bad-whitespace 146 | 147 | log = logging.getLogger(name) 148 | if filehandler: 149 | log.setLevel(DEBUG) 150 | else: 151 | log.setLevel(level) 152 | 153 | formatter = CmnFormatter('%(color_start)s%(levelname)s:%(name)s: %(message)s%(color_end)s', use_color=use_color) 154 | 155 | shandler = logging.StreamHandler() 156 | shandler.setFormatter(formatter) 157 | shandler.setLevel(level) 158 | log.addHandler(shandler) 159 | 160 | if filehandler: 161 | fhandler = logging.FileHandler(filehandler) 162 | fhandler.setFormatter(formatter) 163 | log.addHandler(fhandler) 164 | 165 | return log 166 | 167 | 168 | def _simple_test(): 169 | """Testing only 170 | Runs a simple sequence for manual comparison 171 | """ 172 | log = build_logger("main") 173 | log.debug("This message shouldn't print based on default level of info.") 174 | log.info("Message 1 prints.") 175 | log.exit_if_warnings_or_errors("Will not exit due to lack of previous errors") 176 | log.info("Message 2 prints.") 177 | log.error("Message 3 is an error and it will print.") 178 | log.exit_if_warnings_or_errors("Message 4: exiting due to errors. " 179 | "This is the last message to print.") 180 | log.info("Will not get to this point.") 181 | # Expected output 182 | # INFO:main:Message 1 prints. 183 | # INFO:main:Message 2 prints. 184 | # ERROR:main:Message 3 is an error and it will print. 185 | # CRITICAL:main:Message 4: exiting due to errors. This is the last message to print. 186 | 187 | 188 | if __name__ == '__main__': 189 | _simple_test() 190 | -------------------------------------------------------------------------------- /lib/job_lib.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Definitions for Job-running and Job-related classes.""" 3 | 4 | ################################################################################ 5 | # standard lib imports 6 | import bisect 7 | import datetime 8 | import enum 9 | import os 10 | import signal 11 | import subprocess 12 | import threading 13 | import time 14 | 15 | 16 | @enum.unique 17 | class JobStatus(enum.Enum): 18 | NOT_STARTED = 0 19 | TO_BE_BYPASSED = 1 20 | PASSED = 10 21 | FAILED = 11 22 | SKIPPED = 12 # Due to upstream dependency failures, this job was not run 23 | BYPASSED = 13 # Skipped due to a norun directive, allows downstream jobs to execute assuming the outputs of this job have been previously created 24 | 25 | @property 26 | def completed(self): 27 | return self.value >= self.__class__.PASSED.value 28 | 29 | @property 30 | def successful(self): 31 | return self in [self.PASSED, self.BYPASSED] 32 | 33 | def __str__(self): 34 | return self.name 35 | 36 | def _error(self, new_state): 37 | raise ValueError("May not go from {} to {}".format(self, new_state)) 38 | 39 | def update(self, new_state): 40 | """Check for legal transitions. 41 | This doesn't actually change this instance, an assignment must be done with retval. 42 | Example: 43 | 44 | self._jobstatus = self._jobstatus.update(new_jobstatus) 45 | """ 46 | if new_state == self.NOT_STARTED: 47 | self._error(new_state) 48 | if self == new_state: 49 | pass # No actual transition, ignore 50 | elif self == self.NOT_STARTED: 51 | pass # Any transition is legal 52 | elif self == self.TO_BE_BYPASSED: 53 | if new_state == self.PASSED: 54 | return self.BYPASSED # In the case of a bypassed job, part of 55 | # the job may still be run with a 56 | # placeholder command. Downstream logic 57 | # may mark this as passed, but keep 58 | # bypassed for final formatting. 59 | if new_state != self.FAILED: 60 | self._error(new_state) 61 | elif self == self.PASSED: 62 | if new_state != self.FAILED: 63 | self._error(new_state) 64 | elif self == self.FAILED: 65 | self._error(new_state) 66 | elif self == self.SKIPPED: 67 | if new_state != self.FAILED: 68 | self._error(new_state) 69 | elif self == self.BYPASSED: 70 | if new_state != self.FAILED: 71 | self._error(new_state) 72 | else: 73 | raise ValueError("Unknown current state") 74 | return new_state 75 | 76 | 77 | class Job(): 78 | 79 | _priority_cache = {} 80 | 81 | def __init__(self, rcfg, name): 82 | self.rcfg = rcfg # Regression cfg object 83 | self.name = name 84 | 85 | # String set by derived class of the directory to run this job in 86 | self.job_dir = None 87 | 88 | self.job_lib = None 89 | 90 | self.job_start_time = None 91 | self.job_stop_time = None 92 | 93 | self._jobstatus = JobStatus.NOT_STARTED 94 | 95 | self.suppress_output = False 96 | # FIXME need to implement a way to actually override this 97 | # FIXME add multiplier for --gui 98 | #self.timeout = 12.25 # Float hours 99 | self.timeout = rcfg.options.timeout 100 | 101 | self.priority = -3600 # Not sure that making this super negative is necessary if we log more stuff 102 | self._get_priority() 103 | self.log = self.rcfg.log 104 | self.log.debug("%s priority=%d", self, self.priority) 105 | 106 | # Implement both directions to make traversal of graph easier 107 | self._dependencies = [] # Things this job is dependent on 108 | self._children = [] # Jobs that depend on this jop 109 | 110 | def __lt__(self, other): 111 | return self.priority < other.priority 112 | 113 | def _get_priority(self): 114 | """This function is intended to assign a priority to this Job based on statistics of previous runs of this Job. 115 | 116 | However, integration with the external simulation statistics aggregator didn't work well so support was removed. 117 | """ 118 | return # Default zero priority 119 | 120 | @property 121 | def jobstatus(self): 122 | return self._jobstatus 123 | 124 | @jobstatus.setter 125 | def jobstatus(self, new_jobstatus): 126 | self._jobstatus = self._jobstatus.update(new_jobstatus) 127 | 128 | def add_dependency(self, dep): 129 | if not dep: 130 | self.log.error("%s added null dep", self) 131 | else: 132 | self._dependencies.append(dep) 133 | dep._children.append(self) 134 | dep.increase_priority(self.priority) 135 | 136 | def increase_priority(self, value): 137 | # Recurse up with new value 138 | self.priority += value 139 | for dep in self._dependencies: 140 | dep.increase_priority(value) 141 | 142 | def pre_run(self): 143 | self.log.info("Starting %s %s", self.__class__.__name__, self.name) 144 | self.job_start_time = datetime.datetime.now() 145 | 146 | if not os.path.exists(self.job_dir): 147 | self.log.debug("Creating job_dir: %s", self.job_dir) 148 | os.mkdir(self.job_dir) 149 | 150 | def post_run(self): 151 | self.job_stop_time = datetime.datetime.now() 152 | self.log.debug("post_run %s %s duration %s", self.__class__.__name__, self.name, self.duration_s) 153 | self.completed = True 154 | 155 | @property 156 | def duration_s(self): 157 | try: 158 | delta = self.job_stop_time - self.job_start_time 159 | except TypeError: 160 | return 0 161 | return delta.total_seconds() 162 | 163 | 164 | class JobRunner(): 165 | 166 | def __init__(self, job, manager): 167 | self.job = job 168 | self.job.job_lib = self 169 | 170 | self.manager = manager 171 | 172 | self.done = False 173 | self.log = job.log 174 | 175 | def check_for_done(self): 176 | raise NotImplementedError 177 | 178 | @property 179 | def returncode(self): 180 | raise NotImplementedError 181 | 182 | def print_stderr_if_failed(self): 183 | raise NotImplementedError 184 | 185 | 186 | class SubprocessJobRunner(JobRunner): 187 | 188 | def __init__(self, job, manager): 189 | super(SubprocessJobRunner, self).__init__(job, manager) 190 | kwargs = {'shell': True, 'preexec_fn': os.setsid} 191 | 192 | if self.job.suppress_output or self.job.rcfg.options.no_stdout: 193 | self.stdout_fp = open(os.path.join(self.job.job_dir, "stdout.log"), 'w') 194 | self.stderr_fp = open(os.path.join(self.job.job_dir, "stderr.log"), 'w') 195 | kwargs['stdout'] = self.stdout_fp 196 | kwargs['stderr'] = self.stderr_fp 197 | self._start_time = datetime.datetime.now() 198 | self._p = subprocess.Popen(self.job.main_cmdline, **kwargs) 199 | self.log = job.log 200 | 201 | def check_for_done(self): 202 | if self.done: 203 | return self.done 204 | try: 205 | result = self._check_for_done() 206 | except Exception as exc: 207 | self.log.error("Job failed %s:\n%s", self.job, exc) 208 | result = True 209 | if result: 210 | self.done = result 211 | return result 212 | 213 | def _check_for_done(self): 214 | if self._p.poll() is not None: 215 | if self.job.suppress_output or self.job.rcfg.options.no_stdout: 216 | self.stdout_fp.close() 217 | self.stderr_fp.close() 218 | return True 219 | delta = datetime.datetime.now() - self._start_time 220 | if self.job.timeout > 0 and delta > datetime.timedelta(hours=self.job.timeout): 221 | self.log.error("%s exceeded timeout value of %s (job will be killed)", self.job, self.job.timeout) 222 | os.killpg(os.getpgid(self._p.pid), signal.SIGTERM) 223 | with open(os.path.join(self.job.job_dir, "stderr.log"), 'a') as filep: 224 | filep.write("%%E- %s exceeded timeout value of %s (job will be killed)" % (self.job, self.job.timeout)) 225 | with open(os.path.join(self.job.job_dir, "stdout.log"), 'a') as filep: 226 | filep.write("%%E- %s exceeded timeout value of %s (job will be killed)" % (self.job, self.job.timeout)) 227 | return True 228 | return False 229 | 230 | @property 231 | def returncode(self): 232 | return self._p.returncode 233 | 234 | def kill(self): 235 | os.killpg(os.getpgid(self._p.pid), signal.SIGTERM) 236 | # None of the following variants seemed to work (due to shell=True ?) 237 | # process = psutil.Process(self._p.pid) 238 | # for proc in process.children(recursive=True): 239 | # proc.kill() 240 | # process.kill() 241 | 242 | # self._p.terminate() 243 | 244 | # self._p.kill() 245 | 246 | 247 | class JobManager(): 248 | """Manages multiple concurrent jobs""" 249 | 250 | def __init__(self, options, log): 251 | self.log = log 252 | self.max_parallel = options['parallel_max'] 253 | self.sleep_interval = options['parallel_interval'] 254 | self.idle_print_interval = datetime.timedelta(seconds=options['idle_print_seconds']) 255 | 256 | self._quit_count = options['quit_count'] 257 | self._error_count = 0 258 | self._done_grace_exit = False 259 | self.exited_prematurely = False 260 | 261 | # Jobs must transition from todo->ready->active->done 262 | 263 | # These are jobs ready to be run, but may not dependencies filled yet 264 | # This list is maintained in sorted priority order 265 | self._todo = [] 266 | 267 | # Jobs ready to launch (all dependencies met) 268 | # This list is maintained in sorted priority order 269 | self._ready = [] 270 | 271 | # Jobs launched but not yet complete 272 | self._active = [] 273 | 274 | # Completed jobs 275 | self._done = [] 276 | 277 | self._skipped = [] 278 | 279 | self._run_jobs_thread = threading.Thread(name="_run_jobs", target=self._run_jobs) 280 | self._run_jobs_thread.setDaemon(True) 281 | self._run_jobs_thread_active = True 282 | self._run_jobs_thread.start() 283 | 284 | self.job_lib_type = SubprocessJobRunner 285 | 286 | self._last_done_or_idle_print = datetime.datetime.now() 287 | 288 | def _print_state(self, log_fn): 289 | job_queues = ["_todo", "_ready", "_active", "_done", "_skipped"] 290 | for jq in job_queues: 291 | log_fn("%s: %s", jq, getattr(self, jq)) 292 | 293 | def _run_jobs(self): 294 | while self._run_jobs_thread_active: 295 | self._move_todo_to_ready() 296 | self._move_ready_to_active() 297 | while len(self._active): 298 | for i, job in enumerate(self._active): 299 | if job.job_lib.check_for_done(): 300 | self.log.debug("%s body done", job) 301 | try: 302 | job.post_run() 303 | except Exception as exc: 304 | self.log.error("%s post_run_failed()\n:%s", job, exc) 305 | if not job.jobstatus.successful: 306 | self._error_count += 1 307 | if self._error_count >= self._quit_count: 308 | self._graceful_exit() 309 | self._move_children_to_skipped(job) 310 | self._active.pop(i) 311 | self._last_done_or_idle_print = datetime.datetime.now() 312 | self._done.append(job) 313 | # Ideally this would be before post_run, but pass_fail status may be set there 314 | self._move_todo_to_ready() 315 | self._move_ready_to_active() 316 | time_since_last_done_or_idle_print = datetime.datetime.now() - self._last_done_or_idle_print 317 | if time_since_last_done_or_idle_print > self.idle_print_interval: 318 | self._last_done_or_idle_print = datetime.datetime.now() 319 | self._print_state(self.log.info) 320 | 321 | time.sleep(self.sleep_interval) 322 | if not len(self._active): 323 | time.sleep(self.sleep_interval) 324 | 325 | def _move_children_to_skipped(self, job): 326 | for child in job._children: 327 | self.log.info("Skipping job %s due to dependency (%s) failure", child, job) 328 | try: 329 | self._todo.remove(child) 330 | child.jobstatus = JobStatus.SKIPPED 331 | except ValueError: 332 | # Initially, this was a nice sanity check, but it doesn't always hold true 333 | # See azure #924 334 | # if child not in self._skipped: 335 | # raise ValueError("Couldn't find child job to mark as skipped") 336 | continue 337 | self._skipped.append(child) 338 | self._move_children_to_skipped(child) 339 | 340 | def _move_todo_to_ready(self): 341 | self._print_state(self.log.debug) 342 | jobs_that_advanced_state = [] 343 | for i, job in enumerate(self._todo): 344 | if len(job._dependencies) == 0: 345 | # There are no dependencies 346 | bisect.insort_right(self._ready, job) 347 | jobs_that_advanced_state.append(i) 348 | else: 349 | all_dependencies_are_done = all([dep.jobstatus.completed for dep in job._dependencies]) 350 | if not all_dependencies_are_done: 351 | continue 352 | all_dependencies_passed = all([dep.jobstatus.successful for dep in job._dependencies]) 353 | if all_dependencies_passed: 354 | bisect.insort_right(self._ready, job) 355 | jobs_that_advanced_state.append(i) 356 | else: 357 | self.log.error("Skipping job %s due dependency failure", job) 358 | jobs_that_advanced_state.append(i) 359 | self._skipped.append(job) 360 | job.jobstatus = JobStatus.SKIPPED 361 | 362 | # Can't iterate and remove in list at the same time easily 363 | for i in reversed(jobs_that_advanced_state): 364 | self._todo.pop(i) 365 | 366 | def _move_ready_to_active(self): 367 | self._print_state(self.log.debug) 368 | 369 | available_to_run = self.max_parallel - len(self._active) 370 | 371 | jobs_that_advanced_state = [] 372 | for i in range(available_to_run): 373 | try: 374 | job = self._ready[i] 375 | except IndexError: 376 | # We have more jobs available than todos 377 | continue # Need to finish loop or final cleanup wont happen 378 | job.pre_run() 379 | self.log.debug("%s priority: %d", job, job.priority) 380 | self.job_lib_type(job, self) 381 | jobs_that_advanced_state.append(i) 382 | self._active.append(job) 383 | 384 | for i in reversed(jobs_that_advanced_state): 385 | self._ready.pop(i) 386 | 387 | def _graceful_exit(self): 388 | if self._done_grace_exit: 389 | return 390 | self.exited_prematurely = True 391 | self._done_grace_exit = True 392 | self.log.warn("Exceeded quit count. Graceful exit.") 393 | self._skipped.extend(self._todo) 394 | self._todo = [] 395 | self._skipped.extend(self._ready) 396 | self._ready = [] 397 | 398 | def add_job(self, job): 399 | if not isinstance(job, Job): 400 | raise ValueError("Tried to add a non-Job job {} of type {}".format(job, type(job))) 401 | if not self._done_grace_exit: 402 | bisect.insort_right(self._todo, job) 403 | else: 404 | self._skipped.append(job) 405 | 406 | def wait(self): 407 | """Blocks until no jobs are left.""" 408 | self.log.info("Waiting until all jobs are completed.") 409 | while len(self._todo) or len(self._ready) or len(self._active): 410 | self.log.debug("still waiting") 411 | time.sleep(10) 412 | 413 | def stop(self): 414 | """Stop the job runner thread (cpu intenstive). This is really more of a pause than a full stop&exit.""" 415 | self._run_jobs_thread_active = False 416 | self.exited_prematurely = True 417 | 418 | def kill(self): 419 | self.stop() 420 | for job in self._active: 421 | job.job_lib.kill() 422 | 423 | 424 | class BazelTBJob(Job): 425 | """Runs bazel to build up a tb compile.""" 426 | 427 | def __init__(self, rcfg, target, vcomper): 428 | self.bazel_target = target 429 | super(BazelTBJob, self).__init__(rcfg, self) 430 | self.vcomper = vcomper 431 | if vcomper: 432 | self.vcomper.add_dependency(self) 433 | 434 | self.job_dir = self.vcomper.job_dir # Don't actually need a dir, but jobrunner/manager want it defined 435 | if self.rcfg.options.no_compile: 436 | self.main_cmdline = "echo \"Bypassing {} due to --no-compile\"".format(target) 437 | else: 438 | self.main_cmdline = "bazel run {}".format(target) 439 | 440 | self.suppress_output = True 441 | if self.rcfg.options.tool_debug: 442 | self.suppress_output = False 443 | 444 | def post_run(self): 445 | super(BazelTBJob, self).post_run() 446 | if self.job_lib.returncode == 0: 447 | self.jobstatus = JobStatus.PASSED 448 | else: 449 | self.jobstatus = JobStatus.FAILED 450 | self.log.error("%s failed. Log in %s", self, os.path.join(self.job_dir, "stderr.log")) 451 | 452 | def __repr__(self): 453 | return 'Bazel("{}")'.format(self.bazel_target) 454 | 455 | 456 | class BazelTestCfgJob(Job): 457 | """Bazel build for a testcfg only needs to be run once per test cfg, not per iteration. So split it out into its own job""" 458 | 459 | def __init__(self, rcfg, target, vcomper): 460 | self.bazel_target = target 461 | super(BazelTestCfgJob, self).__init__(rcfg, self) 462 | self.vcomper = vcomper 463 | if vcomper: 464 | self.add_dependency(vcomper) 465 | 466 | self.job_dir = self.vcomper.job_dir # Don't actually need a dir, but jobrunner/manager want it defined 467 | self.main_cmdline = "bazel build {}".format(self.bazel_target) 468 | 469 | self.suppress_output = True 470 | if self.rcfg.options.tool_debug: 471 | self.suppress_output = False 472 | 473 | def post_run(self): 474 | super(BazelTestCfgJob, self).post_run() 475 | if self.job_lib.returncode == 0: 476 | self.jobstatus = JobStatus.PASSED 477 | else: 478 | self.jobstatus = JobStatus.FAILED 479 | self.log.error("%s failed. Log in %s", self, os.path.join(self.job_dir, "stderr.log")) 480 | 481 | def dynamic_args(self): 482 | """Additional arugmuents to specific to each simulation""" 483 | path, target = self.bazel_target.split(":") 484 | path_to_dynamic_args_files = os.path.join(self.rcfg.proj_dir, "bazel-bin", path[2:], 485 | "{}_dynamic_args.py".format(target)) 486 | with open(path_to_dynamic_args_files, 'r') as filep: 487 | content = filep.read() 488 | dynamic_args = eval(content) 489 | return dynamic_args 490 | 491 | def __repr__(self): 492 | return 'Bazel("{}")'.format(self.bazel_target) 493 | 494 | 495 | class BazelShutdownJob(Job): 496 | """When all vcomps are done, shutdown bazel server to limit memory consumption. 497 | 498 | Once sockets were added, where 'bazel run' may be invoked, there is concern that this may cause 499 | intermittent failures due to race conditions. Leaving this class and instantiation for posterity, 500 | but changing the execution to not actually do a shutdown. 501 | """ 502 | 503 | def __init__(self, rcfg): 504 | super(BazelShutdownJob, self).__init__(rcfg, "bazel shutdown") 505 | 506 | self.job_dir = rcfg.proj_dir 507 | # self.main_cmdline = "bazel shutdown" 508 | self.main_cmdline = "echo \"Skipping bazel shutdown\"" 509 | 510 | self.suppress_output = True 511 | if self.rcfg.options.tool_debug: 512 | self.suppress_output = False 513 | 514 | def post_run(self): 515 | super(BazelShutdownJob, self).post_run() 516 | if self.job_lib.returncode == 0: 517 | self.jobstatus = JobStatus.PASSED 518 | else: 519 | self.jobstatus = JobStatus.FAILED 520 | self.log.error("%s failed. Log in %s", self, os.path.join(self.job_dir, "stderr.log")) 521 | 522 | def __repr__(self): 523 | return 'Bazel Shutdown' 524 | -------------------------------------------------------------------------------- /lib/parser_actions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | 5 | 6 | class TestAction(argparse.Action): 7 | 8 | class TestArg(): 9 | 10 | def __init__(self, btiglob): 11 | self.btiglob = btiglob 12 | self.tag = set() 13 | self.ntag = set() 14 | 15 | def __repr__(self): 16 | return "TestArg(btiglob='{}', tags={}, ntags={})".format(self.btiglob, self.tag, self.ntag) 17 | 18 | def __call__(self, parser, namespace, values, option_string=None): 19 | ta = self.__class__.TestArg(values) 20 | getattr(namespace, self.dest).append(ta) 21 | 22 | 23 | class TagAction(argparse.Action): 24 | 25 | def __call__(self, parser, namespace, values, option_string=None): 26 | try: 27 | last_test = namespace.tests[-1] 28 | except IndexError: 29 | return # The return is actually more graceful than the explicit value error 30 | # It relies upon argparse to catch the missing option and throw a better formatted error 31 | # raise ValueError("Attempted to use a test tag filter without any tests specified. Did you forget the '-t' flag?") 32 | l = getattr(last_test, self.dest) 33 | l.add(values) 34 | 35 | 36 | class GlobalTagAction(argparse.Action): 37 | 38 | def __call__(self, parser, namespace, values, option_string=None): 39 | gt = getattr(namespace, self.dest) 40 | gt.add(values) 41 | 42 | 43 | class XpropAction(argparse.Action): 44 | legal_xprop_options = { 45 | 'C': 'C - Compute as ternary (CAT)', 46 | 'F': 'F - Forward only X (FOX)', 47 | 'D': 'D - Disable xprop', 48 | } 49 | 50 | def __call__(self, parser, args, values, option_string=None): 51 | if values in ['C', 'F']: 52 | setattr(args, self.dest, values) 53 | elif values == 'D': 54 | setattr(args, self.dest, None) 55 | else: 56 | parser.error("Illegal xprop value {}, only the following are allowed:\n {}".format( 57 | values, "\n ".join(["{} : {}".format(ii, jj) for ii, jj in self.legal_xprop_options.items()]))) 58 | 59 | 60 | class CovAction(argparse.Action): 61 | legal_coverage_options = { 62 | 'B': 'Block - For enabling block coverage', 63 | 'E': 'Expr - For enabling expression coverage', 64 | 'F': 'Fsm - For enabling fsm coverage', 65 | 'T': 'Toggle - For enabling toggle coverage', 66 | 'U': 'fUnctional - For enabling functional coverage', 67 | 'A': 'All - For enabling all supported coverage types' 68 | } 69 | 70 | @classmethod 71 | def format_options(cls, indent=2): 72 | return f"\n{' '*indent}".join(["{} : {}".format(ii, jj) for ii, jj in cls.legal_coverage_options.items()]) 73 | 74 | def __call__(self, parser, args, values, option_string=None): 75 | cov_options = values.split(':') 76 | for cov_option in cov_options: 77 | if cov_option not in self.legal_coverage_options: 78 | parser.error( 79 | "Illegal coverage value {}\nRequires a colon separated list of the following values:\n {}".format( 80 | cov_option, self.format_options())) 81 | setattr(args, self.dest, values) 82 | -------------------------------------------------------------------------------- /lib/regression.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ################################################################################ 4 | # standard lib imports 5 | import fnmatch 6 | import os 7 | import re 8 | import shlex 9 | import subprocess 10 | import sys 11 | from tempfile import TemporaryFile 12 | 13 | ################################################################################ 14 | # rules_verilog lib imports 15 | from lib import rv_utils 16 | 17 | # I'd rather create a "plain" message in the logger 18 | # that doesn't format, but more work than its worth 19 | LOGGER_INDENT = 8 20 | BENCHES_REL_DIR = os.environ.get('BENCHES_REL_DIR', 'benches') 21 | 22 | 23 | class RegressionConfig(): 24 | 25 | def __init__(self, options, log): 26 | self.options = options 27 | self.log = log 28 | 29 | self.max_bench_name_length = 20 30 | self.max_test_name_length = 20 31 | 32 | self.suppress_output = False 33 | 34 | self.proj_dir = self.options.proj_dir 35 | self.regression_dir = rv_utils.calc_simresults_location(self.proj_dir) 36 | if not os.path.exists(self.regression_dir): 37 | os.mkdir(self.regression_dir) 38 | 39 | self.invocation_dir = os.getcwd() 40 | 41 | self.test_discovery() 42 | 43 | total_tests = sum([iterations for vcomp in self.all_vcomp.values() for test, iterations in vcomp.items()]) 44 | if total_tests == 0: 45 | self.log.critical("Test globbing resulted in no tests to run") 46 | 47 | self.tidy = True 48 | if total_tests == 1: 49 | self.tidy = False 50 | if self.options.waves is not None: 51 | self.tidy = False 52 | if self.options.nt: 53 | self.tidy = False 54 | if self.tidy: 55 | self.log.info( 56 | "tidy=%s passing tests will automatically be cleaned up. Use --nt to prevent automatic cleanup.", 57 | self.tidy) 58 | 59 | self.deferred_messages = [] 60 | 61 | def table_format(self, b, t, c, indent=' ' * LOGGER_INDENT): 62 | return "{}{:{}s} {:{}s} {:{}s}".format(indent, b, self.max_bench_name_length, t, self.max_test_name_length, c, 63 | 6) 64 | 65 | def table_format_summary_line(self, bench, test, passed, skipped, failed, indent=' ' * LOGGER_INDENT): 66 | return f"{indent}{bench:{self.max_bench_name_length}s} {test:{self.max_test_name_length}s} {passed:{6}s} {skipped:{6}s} {failed:{6}s}" 67 | 68 | def format_test_name(self, b, t, i): 69 | return "{:{}s} {:{}s} {:-4d}".format(b, self.max_bench_name_length, t, self.max_test_name_length, i) 70 | 71 | def test_discovery(self): 72 | """Look for all tests in the checkout and filter down to what was specified on the CLI""" 73 | self.log.summary("Starting test discovery") 74 | dtp = rv_utils.DatetimePrinter(self.log) 75 | 76 | cmd = "bazel query \"kind(dv_tb, //{}/...)\"".format(BENCHES_REL_DIR) 77 | self.log.debug(" > %s", cmd) 78 | 79 | dtp.reset() 80 | with TemporaryFile() as stdout_fp, TemporaryFile() as stderr_fp: 81 | p = subprocess.Popen(cmd, stdout=stdout_fp, stderr=stderr_fp, shell=True) 82 | p.wait() 83 | stdout_fp.seek(0) 84 | stderr_fp.seek(0) 85 | stdout = stdout_fp.read() 86 | stderr = stderr_fp.read() 87 | if p.returncode: 88 | self.log.critical("bazel bench discovery failed: %s", stderr.decode('ascii')) 89 | 90 | dtp.stop_and_print() 91 | all_vcomp = stdout.decode('ascii').split('\n') 92 | all_vcomp = dict([(av, {}) for av in all_vcomp if av]) 93 | 94 | tests_to_tags = {} 95 | vcomp_to_query_results = {} 96 | 97 | for vcomp, tests in all_vcomp.items(): 98 | vcomp_path, _ = vcomp.split(':') 99 | test_wildcard = os.path.join(vcomp_path, "tests", "...") 100 | if self.options.allow_no_run: 101 | cmd = 'bazel cquery "attr(abstract, 0, kind(dv_test_cfg, {test_wildcard} intersect allpaths({test_wildcard}, {vcomp})))"'.format( 102 | test_wildcard=test_wildcard, vcomp=vcomp) 103 | else: 104 | cmd = 'bazel cquery "attr(no_run, 0, attr(abstract, 0, kind(dv_test_cfg, {test_wildcard} intersect allpaths({test_wildcard}, {vcomp}))))"'.format( 105 | test_wildcard=test_wildcard, vcomp=vcomp) 106 | 107 | self.log.debug(" > %s", cmd) 108 | 109 | dtp.reset() 110 | 111 | with TemporaryFile() as stdout_fp, TemporaryFile() as stderr_fp: 112 | cmd = shlex.split(cmd) 113 | p = subprocess.Popen(cmd, stdout=stdout_fp, stderr=stderr_fp, shell=False, bufsize=-1) 114 | p.wait() 115 | stdout_fp.seek(0) 116 | stderr_fp.seek(0) 117 | stdout = stdout_fp.read() 118 | stderr = stderr_fp.read() 119 | if p.returncode: 120 | self.log.critical("bazel test discovery failed:\n%s", stderr.decode('ascii')) 121 | 122 | dtp.stop_and_print() 123 | query_results = stdout.decode('ascii').replace('\n', ' ') 124 | query_results = re.sub("\([a-z0-9]{7,64}\) *", "", query_results) 125 | vcomp_to_query_results[vcomp] = query_results 126 | 127 | for vcomp, tests in all_vcomp.items(): 128 | query_results = vcomp_to_query_results[vcomp] 129 | cmd = "bazel build {} --aspects @rules_verilog//verilog/private:dv.bzl%verilog_dv_test_cfg_info_aspect".format( 130 | query_results) 131 | self.log.debug(" > %s", cmd) 132 | 133 | dtp.reset() 134 | with TemporaryFile() as stdout_fp, TemporaryFile() as stderr_fp: 135 | cmd = shlex.split(cmd) 136 | p = subprocess.Popen(cmd, stdout=stdout_fp, stderr=stderr_fp, shell=False, bufsize=-1) 137 | p.wait() 138 | stdout_fp.seek(0) 139 | stderr_fp.seek(0) 140 | stdout = stdout_fp.read() 141 | stderr = stderr_fp.read() 142 | if p.returncode: 143 | self.log.critical("bazel test discovery failed:\n%s", stderr.decode('ascii')) 144 | 145 | dtp.stop_and_print() 146 | text = stdout.decode('ascii').split('\n') + stderr.decode('ascii').split('\n') 147 | 148 | ttv = [ 149 | re.search("verilog_dv_test_cfg_info\((?P.*), (?P.*), \[(?P.*)\]\)", line) 150 | for line in text 151 | ] 152 | ttv = [match for match in ttv if match] 153 | 154 | matching_tests = [(mt.group('test'), eval("[%s]" % mt.group('tags'))) for mt in ttv 155 | if mt.group('vcomp') == vcomp] 156 | tests_to_tags.update(matching_tests) 157 | tests.update(dict([(t[0], 0) for t in matching_tests])) 158 | 159 | table_output = [] 160 | table_output.append(self.table_format("bench", "test", "count")) 161 | table_output.append(self.table_format("-----", "----", "-----")) 162 | for vcomp, tests in all_vcomp.items(): 163 | bench = vcomp.split(':')[1] 164 | for i, (test_target, count) in enumerate(tests.items()): 165 | test = test_target.split(':')[1] 166 | if i == 0: 167 | table_output.append(self.table_format(bench, test, str(count))) 168 | else: 169 | table_output.append(self.table_format('', test, str(count))) 170 | 171 | self.log.debug("Tests available:\n%s", "\n".join(table_output)) 172 | 173 | # bti is bench-test-iteration 174 | for ta in self.options.tests: 175 | try: 176 | btglob, iterations = ta.btiglob.split("@") 177 | try: 178 | iterations = int(iterations) 179 | except ValueError: 180 | self.log.critical("iterations (value after after @) was not integer: '%s'", ta.btiglob) 181 | except ValueError: 182 | btglob = ta.btiglob 183 | iterations = 1 184 | 185 | try: 186 | bglob, tglob = btglob.split(":") 187 | except ValueError: 188 | # If inside a testbench directory, it's only necessary to provide a single glob 189 | pwd = os.getcwd() 190 | benches_dir = os.path.join(self.proj_dir, BENCHES_REL_DIR) 191 | if not (benches_dir in pwd and len(benches_dir) < len(pwd)): 192 | self.log.critical("Not in a benches/ directory. Must provide bench:test style glob.") 193 | bglob = pwd[len(benches_dir) + 1:] 194 | tglob = btglob 195 | 196 | query = "*:{}".format(bglob) # Matching against a bazel label 197 | vcomp_match = fnmatch.filter(all_vcomp.keys(), query) 198 | 199 | self.log.debug("Looking for tests matching %s", ta) 200 | 201 | for vcomp in vcomp_match: 202 | tests = all_vcomp[vcomp] 203 | query = "*:{}".format(tglob) # Matching against a bazel label 204 | test_match = fnmatch.filter(tests, query) 205 | for test in test_match: 206 | # Filter tests againsts tags 207 | test_tags = set(tests_to_tags[test]) 208 | if ta.tag and not ((ta.tag & test_tags) == ta.tag): 209 | self.log.debug(" Skipping %s because it did not match --tag=%s", test, ta.tag) 210 | continue 211 | if ta.ntag and (ta.ntag & test_tags): 212 | self.log.debug(" Skipping %s because it matched --ntags=%s", test, ta.ntag) 213 | continue 214 | if self.options.global_tag and not ( 215 | (self.options.global_tag & test_tags) == self.options.global_tag): 216 | self.log.debug(" Skipping %s because it did not match --global-tag=%s", test, 217 | self.options.global_tag) 218 | continue 219 | if self.options.global_ntag and (self.options.global_ntag & test_tags): 220 | self.log.debug(" Skipping %s because it match --global-ntags=%s", test, 221 | self.options.global_ntag) 222 | continue 223 | self.log.debug(" %s met tag requirements", test) 224 | try: 225 | new_max = max(tests[test], iterations) 226 | except KeyError: 227 | new_max = iterations 228 | tests[test] = new_max 229 | 230 | # Now prune down all the tests and benches that aren't active 231 | for vcomp, tests in all_vcomp.items(): 232 | all_vcomp[vcomp] = dict([(t, i) for t, i in tests.items() if i]) 233 | all_vcomp = dict([(vcomp, tests) for vcomp, tests in all_vcomp.items() if len(tests)]) 234 | 235 | table_output = [] 236 | table_output.append(self.table_format("bench", "test", "count")) 237 | table_output.append(self.table_format("-----", "----", "-----")) 238 | vcomps = list(all_vcomp.keys()) 239 | vcomps.sort() 240 | for vcomp in vcomps: 241 | bench = vcomp.split(':')[1] 242 | tests = all_vcomp[vcomp] 243 | test_targets = list(tests.keys()) 244 | test_targets.sort() 245 | for i, test_target in enumerate(test_targets): 246 | test = test_target.split(':')[1] 247 | count = tests[test_target] 248 | if i == 0: 249 | table_output.append(self.table_format(bench, test, str(count))) 250 | else: 251 | table_output.append(self.table_format('', test, str(count))) 252 | 253 | self.log.info("Tests to run:\n%s", "\n".join(table_output)) 254 | 255 | self.all_vcomp = all_vcomp 256 | 257 | if self.options.discovery_only: 258 | self.log.info("Ran with --discovery-only option. Exiting.") 259 | sys.exit(0) 260 | -------------------------------------------------------------------------------- /lib/rv_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Utility class definitions.""" 3 | 4 | import datetime 5 | import getpass 6 | import os 7 | import re 8 | 9 | # I'd rather create a "plain" message in the logger 10 | # that doesn't format, but more work than its worth 11 | LOGGER_INDENT = 8 12 | SIMRESULTS = os.environ.get('SIMRESULTS', '') 13 | 14 | 15 | class DatetimePrinter(): 16 | 17 | def __init__(self, log): 18 | self.ts = datetime.datetime.now() 19 | self.log = log 20 | 21 | def reset(self): 22 | self.ts = datetime.datetime.now() 23 | 24 | def stop_and_print(self): 25 | stop = datetime.datetime.now() 26 | delta = stop - self.ts 27 | self.log.debug("Last time check: %d", delta.total_seconds()) 28 | 29 | 30 | class IterationCfg(): 31 | 32 | def __init__(self, target): 33 | self.target = target 34 | self.spawn_count = 1 35 | self.jobs = [] 36 | 37 | def inc(self, job): 38 | self.spawn_count += 1 39 | self.jobs.append(job) 40 | 41 | def __lt__(self, other): 42 | return self.jobs[0].name < other.jobs[0].name 43 | 44 | 45 | def print_summary(rcfg, vcomp_jobs, icfgs, jm): 46 | table_data = [("bench", "test", "passed", "skipped", "failed", "logs")] 47 | separator = [""] * len(table_data[0]) 48 | table_data.append(separator) 49 | 50 | total_passed = 0 51 | total_skipped = 0 52 | total_failed = 0 53 | 54 | last = len(rcfg.all_vcomp) - 1 55 | for i, (vcomp_name, (icfgs, test_list)) in enumerate(rcfg.all_vcomp.items()): 56 | vcomp = vcomp_jobs[vcomp_name] 57 | table_data.append( 58 | (vcomp.name, "vcomp", '1' if vcomp.jobstatus.successful else '', 59 | '1' if vcomp.jobstatus == vcomp.jobstatus.SKIPPED else '', '1' if not vcomp.jobstatus.successful else '', 60 | '' if vcomp.jobstatus.successful else str(vcomp.log_path))) 61 | if vcomp.jobstatus == vcomp.jobstatus.PASSED: 62 | total_passed += 1 63 | elif vcomp.jobstatus == vcomp.jobstatus.FAILED: 64 | total_failed += 1 65 | else: 66 | total_skipped += 1 67 | 68 | if rcfg.options.no_run: 69 | continue 70 | 71 | icfgs.sort() 72 | for icfg in icfgs: 73 | if not icfg.jobs[0].vcomper is vcomp: 74 | continue 75 | passed = [j for j in icfg.jobs if j.jobstatus.completed and j.jobstatus.successful] 76 | failed = [j for j in icfg.jobs if j.jobstatus == j.jobstatus.FAILED] 77 | skipped = [j for j in icfg.jobs if j.jobstatus not in [j.jobstatus.FAILED, j.jobstatus.PASSED]] 78 | 79 | total_passed += len(passed) 80 | total_failed += len(failed) 81 | total_skipped += len(skipped) 82 | 83 | try: 84 | assert len(passed) + len(failed) + len(skipped) == len(icfg.jobs), print( 85 | len(passed), len(failed), len(skipped), len(icfg.jobs)) 86 | except AssertionError as exc: 87 | if not jm.exited_prematurely: 88 | raise exc 89 | 90 | table_data.append( 91 | ("", icfg.jobs[0].name, str(len(passed)) if passed else "", str(len(skipped)) if skipped else "", 92 | str(len(failed)) if failed else "", "")) 93 | for j in failed: 94 | table_data.append(("", "", "", "", "", j.log_path if j.log_path else '')) 95 | if i != last: 96 | table_data.append(separator) 97 | 98 | # Check that entries are consistent 99 | assert all(len(i) == len(table_data[0]) for i in table_data) 100 | columns = list(zip(*table_data)) 101 | column_widths = [max([len(cell) for cell in col]) for col in columns] 102 | formatter = " " * LOGGER_INDENT + " ".join( 103 | ["{{:{}{}s}}".format('>' if i in [2, 3, 4] else '', c) for i, c in enumerate(column_widths)]) 104 | for i, entry in enumerate(table_data): 105 | if entry == separator: 106 | table_data[i] = ['-' * cw for cw in column_widths] 107 | table_data_formatted = [formatter.format(*i) for i in table_data] 108 | rcfg.log.summary("Job Results\n%s", "\n".join(table_data_formatted)) 109 | 110 | table_data = [("", "", "passed", "skipped", "failed", "")] 111 | table_data.append(['-' * len(i) for i in table_data[0]]) 112 | table_data.append(("", "", str(total_passed), str(total_skipped), str(total_failed), "")) 113 | table_data_formatted = [formatter.format(*i) for i in table_data] 114 | rcfg.log.summary("Simulation Summary\n%s", "\n".join(table_data_formatted)) 115 | 116 | 117 | def calc_simresults_location(checkout_path): 118 | """Calculate the path to put regression results.""" 119 | username = getpass.getuser() 120 | 121 | # FIXME, we may want to detect who owns the check to allow 122 | # for rerunning in someone else's area? # pylint: disable=fixme 123 | sim_results_home = os.path.join(SIMRESULTS, username) 124 | if not os.path.exists(sim_results_home): 125 | os.mkdir(sim_results_home) 126 | 127 | # If username is in the checkout_path try to reduce the name 128 | # Assume username is somewhere is path 129 | try: 130 | checkout_path = re.search("{}/(.*)".format(username), checkout_path).group(1) 131 | except AttributeError: 132 | pass 133 | checkout_path = checkout_path.replace('/', '_') 134 | # Adding the datetime into the regression directory will force a recompile. 135 | # Ideally, the vcomp directory will need to have the same name 136 | # strdate = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time())) 137 | # regression_directory = '{}__{}'.format(checkout_path, strdate) 138 | regression_directory = checkout_path 139 | regression_directory = os.path.join(sim_results_home, regression_directory) 140 | return regression_directory 141 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | bs4==0.0.1 2 | cookiecutter==2.1.1 3 | -------------------------------------------------------------------------------- /simulator.bzl: -------------------------------------------------------------------------------- 1 | # -*- mode: python -*- 2 | """Provides access to the DPI headers in an xcelium installation. 3 | 4 | This allows bazel to precompile DPI code to be able to pass shared 5 | objects to the simulator. 6 | 7 | Example usage in another WORKSPACE file: 8 | 9 | load("@rules_verilog//:simulator.bzl", "xcelium_setup") 10 | xcelium_setup(name="xcelium") 11 | """ 12 | 13 | XRUN_BUILD = """ 14 | filegroup( 15 | name = "dpi_headers", 16 | srcs = ["svdpi.h", "svdpi_compatibility.h"], 17 | visibility = ["//visibility:public"], 18 | ) 19 | """ 20 | 21 | VCS_BUILD = """ 22 | filegroup( 23 | name = "dpi_headers", 24 | srcs = ["svdpi.h", "svdpi_src.h"], 25 | visibility = ["//visibility:public"], 26 | ) 27 | """ 28 | 29 | XRUN_DPI_HEADERS = ["svdpi.h", "svdpi_compatibility.h"] 30 | VCS_DPI_HEADERS = ["svdpi.h", "svdpi_src.h"] 31 | 32 | VARS = ["PROJ_DIR", "MODULEPATH"] 33 | 34 | def _xcelium_setup_impl(repository_ctx): 35 | if repository_ctx.attr.name != "xcelium": 36 | fail("Name xcelium_setup rule: 'xcelium'!") 37 | result = repository_ctx.execute( 38 | ["runmod", "xrun", "--", "printenv", "XCELIUMHOME"], 39 | environment = repository_ctx.os.environ, 40 | # working_directory="..", 41 | ) 42 | if result.return_code: 43 | fail("{}\n{}\nFailed running find xcelium command".format(result.stdout, result.stderr)) 44 | xcelium_home = result.stdout.strip() 45 | include = "{}/tools.lnx86/include".format(xcelium_home) 46 | for hdr in XRUN_DPI_HEADERS: 47 | hdr_path = "{}/{}".format(include, hdr) 48 | repository_ctx.symlink(hdr_path, hdr) 49 | repository_ctx.file("BUILD", XRUN_BUILD) 50 | 51 | xcelium_setup = repository_rule( 52 | implementation = _xcelium_setup_impl, 53 | local = True, 54 | environ = VARS, 55 | ) 56 | 57 | def _vcs_setup_impl(repository_ctx): 58 | if repository_ctx.attr.name != "vcs": 59 | fail("Name vcs_setup rule: 'vcs'!") 60 | result = repository_ctx.execute( 61 | ["runmod", "vcs", "--", "printenv", "VCS_HOME"], 62 | environment = repository_ctx.os.environ, 63 | # working_directory="..", 64 | ) 65 | if result.return_code: 66 | fail("{}\n{}\nFailed running find vcs command".format(result.stdout, result.stderr)) 67 | vcs_home = result.stdout.strip() 68 | include = "{}/include".format(vcs_home) 69 | for hdr in VCS_DPI_HEADERS: 70 | hdr_path = "{}/{}".format(include, hdr) 71 | repository_ctx.symlink(hdr_path, hdr) 72 | repository_ctx.file("BUILD", VCS_BUILD) 73 | 74 | vcs_setup = repository_rule( 75 | implementation = _vcs_setup_impl, 76 | local = True, 77 | environ = VARS, 78 | ) 79 | -------------------------------------------------------------------------------- /tests/BUILD: -------------------------------------------------------------------------------- 1 | load("@com_github_bazelbuild_buildtools//buildifier:def.bzl", "buildifier") 2 | 3 | buildifier( 4 | name = "buildifier_format_diff", 5 | diff_command = "diff", 6 | mode = "diff", 7 | ) 8 | 9 | buildifier( 10 | name = "buildifier_lint", 11 | lint_mode = "warn", 12 | lint_warnings = [ 13 | "-function-docstring-args", 14 | "-function-docstring", 15 | ], 16 | mode = "fix", 17 | ) 18 | 19 | buildifier( 20 | name = "buildifier_fix", 21 | lint_mode = "fix", 22 | mode = "fix", 23 | ) 24 | -------------------------------------------------------------------------------- /tests/doc_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Check that the documentation has been regenerated 3 | # This must be executed from the top of the project 4 | 5 | do_regenerate=0 6 | 7 | while (( $# )) 8 | do 9 | case $1 in 10 | "--regenerate" ) 11 | do_regenerate=1 12 | ;; 13 | *) 14 | echo $arg 15 | echo "Unexpected Command Line Input - Exiting" 16 | exit 1 17 | ;; 18 | esac 19 | shift 20 | done 21 | 22 | declare -i result=0 23 | 24 | # Regenerate documentation 25 | bazel build //docs/... 26 | result=$(($? | $result)) 27 | 28 | # Compare each .md file generated in bazel-bin 29 | for bazel_bin_file in bazel-bin/docs/*.md 30 | do 31 | static_file=`echo "$bazel_bin_file" | sed -e "s/^bazel-bin\///"` 32 | if [ $do_regenerate -eq 1 ] 33 | then 34 | cp $bazel_bin_file $static_file 35 | chmod +w $static_file 36 | else 37 | echo "diff $bazel_bin_file $static_file" 38 | diff $bazel_bin_file $static_file 39 | result=$(($? | $result)) 40 | fi 41 | done 42 | 43 | exit $result 44 | -------------------------------------------------------------------------------- /vendors/cadence/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | exports_files(glob([ 4 | "*.template", 5 | "*.f", 6 | ])) 7 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_dv_default_sim_opts.f: -------------------------------------------------------------------------------- 1 | -64bit 2 | -licqueue 3 | -uvmhome CDNS-1.2 4 | +UVM_NO_RELNOTES 5 | -nowarn RNDXCELON:DSEM2009:DSEMEL:TSNSPK 6 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_dv_tb_compile_args.f.template: -------------------------------------------------------------------------------- 1 | -sv 2 | -uvm 3 | -64bit 4 | -verbose 5 | -newperf 6 | -licqueue 7 | -plusperf 8 | -elaborate 9 | -access rwc 10 | -define TBV 11 | -nowarn BADPRF 12 | -nowarn CUVIHR 13 | -nowarn DSEMEL 14 | -nowarn DSEM2009 15 | -nowarn LIBNOU 16 | -nowarn RNDXCELON 17 | -nowarn SPDUSD 18 | -nowarn TSNSPK 19 | -define XCELIUM 20 | -uvmhome CDNS-1.2 21 | -fast_recompilation 22 | -enable_single_yvlib 23 | -timescale 100fs/100fs 24 | -vtimescale 100fs/100fs 25 | -define UVM_REGEX_NO_DPI 26 | -define UVM_NO_DEPRECATED 27 | -libext .sv+.svh+.v+.vams 28 | -vlog_ext +.vh+.vm+.vp+.udp 29 | -define TIMESCALE_STEP_FS=100 30 | -define TIMESCALE_PREC_FS=100 31 | -define UVM_OBJECT_MUST_HAVE_CONSTRUCTOR 32 | {COMPILE_ARGS} 33 | {DEFINES} 34 | {FLISTS} 35 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_dv_tb_compile_args_pldm_ice.f.template: -------------------------------------------------------------------------------- 1 | {COMPILE_ARGS} 2 | {DEFINES} 3 | {FLISTS} 4 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_dv_unit_test.sh.template: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # This is a template for the bazel verilog_dv_unit_test rule 3 | # It is not intended to be run stand-alone 4 | {SIMULATOR_COMMAND} \ 5 | -libext .sv \ 6 | -libext .svh \ 7 | -libext .v \ 8 | -libext .vams \ 9 | -timescale 100fs/100fs \ 10 | -vtimescale 100fs/100fs \ 11 | -define TIMESCALE_STEP_FS=100 \ 12 | -define TIMESCALE_PREC_FS=100 \ 13 | -enable_single_yvlib \ 14 | {DPI_LIBS} \ 15 | {DEFAULT_SIM_OPTS} \ 16 | {FLISTS} \ 17 | {SIM_ARGS} \ 18 | $@ 19 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_rtl_cdc_epilogue_cmds.tcl.template: -------------------------------------------------------------------------------- 1 | # This is a template used in the bazel verilog_rtl_cdc_test rule 2 | # These commands perform common error reporting and determine the exit code of the CDC tool 3 | # They execute after the command_files have all been executed 4 | check_cdc -init 5 | check_cdc -clock_domain -find 6 | check_cdc -pair -find 7 | check_cdc -scheme -find 8 | check_cdc -group -find 9 | check_cdc -reset -find 10 | set all_violas [check_cdc -list violations] 11 | set num_violas [llength $all_violas] 12 | for {set viola_idx 0} {$viola_idx < $num_violas} {incr viola_idx} { 13 | puts "[lindex $all_violas $viola_idx]" 14 | } 15 | set return_value [expr {$num_violas > 0}] 16 | if {$return_value} { 17 | puts "$num_violas errors" 18 | } 19 | if { $::RULES_VERILOG_GUI == 0 } { 20 | exit $return_value 21 | } 22 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_rtl_cdc_preamble_cmds.tcl.template: -------------------------------------------------------------------------------- 1 | # This is a template used in the bazel verilog_rtl_cdc_test rule 2 | # These commands analyze and elaborate the design based on the attributes of the bazel target 3 | # These are the first commands that the CDC tool runs 4 | clear -all 5 | set elaborate_single_run_mode True 6 | analyze -sv09 +libext+.v+.sv {BBOX_MODULES_CMD} {DEFINES} {FLISTS} {TOP_PATH} 7 | elaborate {BBOX_MODULES_CMD} -top {INST_TOP} {BBOX_ARRAY_SIZE_CMD} 8 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_rtl_cdc_test.sh.template: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This is a template for the bazel verilog_rtl_cdc_test rule 3 | # It is not intended to be run stand-alone 4 | set -e 5 | 6 | use_gui=0 7 | remaining_args=() 8 | 9 | while (( $# )) 10 | do 11 | case $1 in 12 | "--gui" ) 13 | use_gui=1 14 | ;; 15 | *) 16 | remaining_args+=($1) 17 | ;; 18 | esac 19 | shift 20 | done 21 | 22 | no_gui_flag="-no_gui" 23 | if [ $use_gui -eq 1 ] 24 | then 25 | no_gui_flag="" 26 | fi 27 | 28 | # The contents run by CDC_COMMAND need to be wrapped in double quotes to account for how JapserGold processes command line arguments 29 | # Without the double quotes, JapserGold doesn't treat all the command line arguments as a single unit. Instead it drops into a shell 30 | # and doesn't run the specified commands. 31 | {CDC_COMMAND} \ 32 | "-cdc \ 33 | $no_gui_flag \ 34 | -proj cdc_run \ 35 | -define RULES_VERILOG_GUI $use_gui \ 36 | -license_remove_method off 37 | {PREAMBLE_CMDS} \ 38 | {CMD_FILES} \ 39 | {EPILOGUE_CMDS} \ 40 | ${remaining_args[@]}" 41 | ! grep "^\[*ERROR" cdc_run/jg.log 42 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_rtl_lint_cmds.tcl.template: -------------------------------------------------------------------------------- 1 | HAL does not use a command file; it only uses a run script. This rendered template should never be called by any run script. 2 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_rtl_lint_test.sh.template: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # This is a template for the bazel verilog_rtl_lint_test rule 3 | # It is not intended to be run stand-alone 4 | mv xrun.log xrun.log.bak 2> /dev/null 5 | mv xrun.log.xml xrun.log.xml.bak 2> /dev/null 6 | {SIMULATOR_COMMAND} \ 7 | -sv \ 8 | -hal \ 9 | -licqueue \ 10 | -libext .v \ 11 | -libext .sv \ 12 | -enable_single_yvlib \ 13 | -timescale 100fs/100fs \ 14 | {DEFINES} \ 15 | {FLISTS} \ 16 | {TOP_PATH} \ 17 | -halargs '"-RULEFILE {RULEFILE} -inst_top {INST_TOP} -design_info {DESIGN_INFO} -XML xrun.log.xml"' \ 18 | -logfile xrun.log 19 | 20 | export PYTHONPATH="{LINT_PARSER_LIB}:$PYTHONPATH" 21 | ./{LINT_PARSER} $@ --waiver-direct "{WAIVER_DIRECT}" 22 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_rtl_unit_test.sh.template: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # This is a template for the bazel verilog_rtl_unit_test rule 3 | # It is not intended to be run stand-alone 4 | set -e 5 | 6 | generate_waves=0 7 | launch_wave_viewer=0 8 | remaining_args=() 9 | remaining_args_str="" 10 | 11 | while (( $# )) 12 | do 13 | case $1 in 14 | "--waves" ) 15 | generate_waves=1 16 | ;; 17 | "--launch" ) 18 | launch_wave_viewer=1 19 | ;; 20 | *) 21 | remaining_args+=($1) 22 | ;; 23 | esac 24 | shift 25 | done 26 | 27 | waves_render_cmd="" 28 | if [ $generate_waves -eq 1 ] 29 | then 30 | waves_render_cmd="-input {WAVES_RENDER_CMD_PATH} -access r" 31 | fi 32 | 33 | waves_launch_cmd="" 34 | if [ $launch_wave_viewer -eq 1 ] 35 | then 36 | waves_render_cmd="-input {WAVES_RENDER_CMD_PATH} -access r" 37 | waves_launch_cmd="{WAVE_VIEWER_COMMAND} waves.shm" 38 | fi 39 | 40 | {SIMULATOR_COMMAND} \ 41 | -define TBV \ 42 | -libext .sv \ 43 | -libext .svh \ 44 | -libext .v \ 45 | -libext .vams \ 46 | -enable_single_yvlib \ 47 | -licqueue \ 48 | -timescale 100fs/100fs \ 49 | -vtimescale 100fs/100fs \ 50 | -define TIMESCALE_STEP_FS=100 \ 51 | -define TIMESCALE_PREC_FS=100 \ 52 | {PRE_FLIST_ARGS} 53 | {FLISTS} \ 54 | {TOP} \ 55 | $waves_render_cmd \ 56 | {POST_FLIST_ARGS} \ 57 | ${remaining_args[@]}; 58 | $waves_launch_cmd 59 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_rtl_unit_test_svunit.sh.template: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # This is a template for the bazel verilog_rtl_unit_test rule in order to run a SVUnit testbench 3 | # It is not intended to be run stand-alone 4 | # See http://agilesoc.com/open-source-projects/svunit/ 5 | set -e 6 | 7 | generate_waves=0 8 | launch_wave_viewer=0 9 | remaining_args=() 10 | 11 | while (( $# )) 12 | do 13 | case $1 in 14 | "--waves" ) 15 | generate_waves=1 16 | ;; 17 | "--launch" ) 18 | launch_wave_viewer=1 19 | ;; 20 | *) 21 | remaining_args+=($1) 22 | ;; 23 | esac 24 | shift 25 | done 26 | 27 | waves_render_cmd="" 28 | if [ $generate_waves -eq 1 ] 29 | then 30 | waves_render_cmd="-r '-input {WAVES_RENDER_CMD_PATH}' -r '-access r'" 31 | fi 32 | 33 | waves_launch_cmd="" 34 | if [ $launch_wave_viewer -eq 1 ] 35 | then 36 | waves_render_cmd="-r '-input {WAVES_RENDER_CMD_PATH}' -r '-access r'" 37 | waves_launch_cmd="{WAVE_VIEWER_COMMAND} waves.shm" 38 | fi 39 | 40 | {SIMULATOR_COMMAND} \ 41 | runSVUnit \ 42 | -s xcelium \ 43 | --no_abs_path_flist \ 44 | --no_feedback \ 45 | {PRE_FLIST_ARGS} \ 46 | -o . \ 47 | -c '-define\ TBV' \ 48 | -c '-libext\ .sv' \ 49 | -c '-libext\ .svh' \ 50 | -c '-libext\ .v' \ 51 | -c '-libext\ .vams' \ 52 | -c '-enable_single_yvlib' \ 53 | -c '-licqueue' \ 54 | -c '-timescale\ 100fs/100fs' \ 55 | -c '-vtimescale\ 100fs/100fs' \ 56 | -c '-define\ TIMESCALE_STEP_FS=100' \ 57 | -c '-define\ TIMESCALE_PREC_FS=100' \ 58 | {FLISTS} \ 59 | $waves_render_cmd \ 60 | {POST_FLIST_ARGS} 61 | ${remaining_args[@]}; 62 | $waves_launch_cmd 63 | 64 | grep -q "\[testrunner\]: PASSED" run.log 65 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_rtl_unit_test_svunit_waves.tcl.template: -------------------------------------------------------------------------------- 1 | # This is a template used in the bazel verilog_rtl_unit_test rule 2 | # These commands are intended to be passed to Xcelium to create the waveform database for an svunit test if the appropriate command line flag is set 3 | database -open -shm mydb -into waves.shm -default 4 | probe -database mydb testrunner -all -dynamic -memories -depth all -packed 2048 -unpacked 2048 5 | puts [string map {"\"" ""} [format {"Waves Available: %s/%s"} [pwd] "waves.shm"]] 6 | run 7 | -------------------------------------------------------------------------------- /vendors/cadence/verilog_rtl_unit_test_waves.tcl.template: -------------------------------------------------------------------------------- 1 | # This is a template used in the bazel verilog_rtl_unit_test rule 2 | # These commands are intended to be passed to Xcelium to create the waveform database for an rtl unit test if the appropriate command line flag is set 3 | database -open -shm mydb -into waves.shm -default 4 | probe -database mydb {TOP_BASE_NAME} -all -dynamic -memories -depth all -packed 2048 -unpacked 2048 5 | puts [string map {"\"" ""} [format {"Waves Available: %s/%s"} [pwd] "waves.shm"]] 6 | run 7 | -------------------------------------------------------------------------------- /vendors/common/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | exports_files(glob([ 4 | "*.template", 5 | "*.f", 6 | ])) 7 | -------------------------------------------------------------------------------- /vendors/common/verilog_dv_tb_runtime_args.f.template: -------------------------------------------------------------------------------- 1 | {RUNTIME_ARGS} 2 | {DPI_LIBS} 3 | -------------------------------------------------------------------------------- /vendors/real_intent/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | exports_files(glob([ 4 | "*.template", 5 | ])) 6 | -------------------------------------------------------------------------------- /vendors/real_intent/verilog_rtl_lint_cmds.tcl.template: -------------------------------------------------------------------------------- 1 | set ri_write_zdb false # Turn off the default debug database becasue it isn't needed for normal batch mode runs 2 | source {RULEFILE} 3 | analyze {DEFINES} {FLISTS} {TOP_PATH} 4 | elaborate {INST_TOP} 5 | report_policy ALL -output lint.rpt -verbose -skip_empty_summary_status 6 | -------------------------------------------------------------------------------- /vendors/real_intent/verilog_rtl_lint_test.sh.template: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # This is a template for the bazel verilog_rtl_lint_test rule 3 | # It is not intended to be run stand-alone 4 | mv lint.log lint.log.bak 2> /dev/null 5 | {SIMULATOR_COMMAND} \ 6 | -log lint.log -i {COMMAND_SCRIPT} -wait_license 7 | 8 | ./{LINT_PARSER} $@ 9 | -------------------------------------------------------------------------------- /vendors/synopsys/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | exports_files(glob([ 4 | "*.template", 5 | "*.f", 6 | ])) 7 | -------------------------------------------------------------------------------- /vendors/synopsys/verilog_dv_default_sim_opts.f: -------------------------------------------------------------------------------- 1 | -full64 2 | -licqueue 3 | +UVM_NO_RELNOTES 4 | -------------------------------------------------------------------------------- /vendors/synopsys/verilog_dv_tb_compile_args.f.template: -------------------------------------------------------------------------------- 1 | -sverilog 2 | {COMPILE_ARGS} 3 | {DEFINES} 4 | {FLISTS} 5 | -------------------------------------------------------------------------------- /verilog/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | exports_files([ 4 | "defs.bzl", 5 | ]) 6 | -------------------------------------------------------------------------------- /verilog/defs.bzl: -------------------------------------------------------------------------------- 1 | """Public entry point to all supported Verilog rules and APIs""" 2 | 3 | load( 4 | "//verilog/private:verilog.bzl", 5 | _verilog_test = "verilog_test", 6 | _verilog_tool_encapsulation = "verilog_tool_encapsulation", 7 | ) 8 | load( 9 | "//verilog/private:rtl.bzl", 10 | _verilog_rtl_cdc_test = "verilog_rtl_cdc_test", 11 | _verilog_rtl_library = "verilog_rtl_library", 12 | _verilog_rtl_lint_test = "verilog_rtl_lint_test", 13 | _verilog_rtl_pkg = "verilog_rtl_pkg", 14 | _verilog_rtl_shell = "verilog_rtl_shell", 15 | _verilog_rtl_unit_test = "verilog_rtl_unit_test", 16 | ) 17 | load( 18 | "//verilog/private:dv.bzl", 19 | _verilog_dv_library = "verilog_dv_library", 20 | _verilog_dv_tb = "verilog_dv_tb", 21 | _verilog_dv_test_cfg = "verilog_dv_test_cfg", 22 | _verilog_dv_unit_test = "verilog_dv_unit_test", 23 | ) 24 | 25 | verilog_tool_encapsulation = _verilog_tool_encapsulation 26 | verilog_test = _verilog_test 27 | 28 | verilog_rtl_cdc_test = _verilog_rtl_cdc_test 29 | verilog_rtl_library = _verilog_rtl_library 30 | verilog_rtl_lint_test = _verilog_rtl_lint_test 31 | verilog_rtl_pkg = _verilog_rtl_pkg 32 | verilog_rtl_shell = _verilog_rtl_shell 33 | verilog_rtl_unit_test = _verilog_rtl_unit_test 34 | 35 | verilog_dv_library = _verilog_dv_library 36 | verilog_dv_tb = _verilog_dv_tb 37 | verilog_dv_test_cfg = _verilog_dv_test_cfg 38 | verilog_dv_unit_test = _verilog_dv_unit_test 39 | -------------------------------------------------------------------------------- /verilog/private/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | exports_files([ 4 | "rtl.bzl", 5 | "dv.bzl", 6 | "verilog.bzl", 7 | ]) 8 | -------------------------------------------------------------------------------- /verilog/private/dv.bzl: -------------------------------------------------------------------------------- 1 | """Rules for building DV infrastructure.""" 2 | 3 | load(":verilog.bzl", "ToolEncapsulationInfo", "VerilogInfo", "flists_to_arguments", "gather_shell_defines", "get_transitive_srcs") 4 | 5 | DVTestInfo = provider(fields = { 6 | "sim_opts": "Simulation options to carry forward.", 7 | "uvm_testname": "UVM Test Name; passed to simulator via plusarg +UVM_TESTNAME.", 8 | "tb": "The verilog_dv_tb (verilog compile) associated with this test. Must be a Label of type verilog_dv_tb.", 9 | "tags": "Additional tags to be able to filter in simmer.", 10 | "timeout": "Duration in minutes before the test will be killed due to timeout.", 11 | "pre_run": "Bazel run command that can be executed immediately before dv_tb simulation.", 12 | }) 13 | 14 | DVTBInfo = provider(fields = { 15 | "ccf": "Coverage config file.", 16 | }) 17 | 18 | def _verilog_dv_test_cfg_impl(ctx): 19 | parent_uvm_testnames = [dep[DVTestInfo].uvm_testname for dep in reversed(ctx.attr.inherits) if hasattr(dep[DVTestInfo], "uvm_testname")] 20 | parent_tbs = [dep[DVTestInfo].tb for dep in reversed(ctx.attr.inherits) if hasattr(dep[DVTestInfo], "tb")] 21 | parent_timeouts = [dep[DVTestInfo].timeout for dep in reversed(ctx.attr.inherits) if hasattr(dep[DVTestInfo], "timeout")] 22 | parent_pre_run = [dep[DVTestInfo].pre_run for dep in reversed(ctx.attr.inherits) if hasattr(dep[DVTestInfo], "pre_run")] 23 | 24 | sim_opts = {} 25 | 26 | # Each successive dependency may override previous deps 27 | for dep in ctx.attr.inherits: 28 | sim_opts.update(dep[DVTestInfo].sim_opts) 29 | 30 | # This rule instance may override previous sim_opts 31 | sim_opts.update(ctx.attr.sim_opts) 32 | 33 | provider_args = {} 34 | 35 | uvm_testname = None 36 | if ctx.attr.uvm_testname: 37 | uvm_testname = ctx.attr.uvm_testname 38 | elif len(parent_uvm_testnames): 39 | uvm_testname = parent_uvm_testnames[0] 40 | else: 41 | uvm_testname = ctx.attr.name 42 | 43 | timeout = None 44 | if ctx.attr.timeout: 45 | timeout = ctx.attr.timeout 46 | elif len(parent_timeouts): 47 | timeout = parent_timeouts[0] 48 | 49 | tb = None 50 | if ctx.attr.tb: 51 | tb = ctx.attr.tb 52 | else: 53 | tb = parent_tbs[0] 54 | 55 | pre_run = None 56 | if ctx.attr.pre_run: 57 | pre_run = ctx.attr.pre_run 58 | elif len(parent_pre_run): 59 | pre_run = parent_pre_run[0] 60 | 61 | provider_args["uvm_testname"] = uvm_testname 62 | provider_args["tb"] = tb 63 | provider_args["timeout"] = timeout 64 | provider_args["sim_opts"] = sim_opts 65 | provider_args["tags"] = ctx.attr.tags 66 | provider_args["pre_run"] = pre_run 67 | 68 | for socket_name, socket_command in ctx.attr.sockets.items(): 69 | if "{socket_file}" not in socket_command: 70 | fail("socket {} did not have {{socket_file}} in socket_command".format(socket_name)) 71 | 72 | dynamic_args = { 73 | "sockets": ctx.attr.sockets, 74 | "timeout": timeout, 75 | "sim_opts": sim_opts, 76 | "uvm_testname": uvm_testname, 77 | "tags": ctx.attr.tags, 78 | "pre_run": pre_run, 79 | } 80 | out = ctx.outputs.dynamic_args 81 | ctx.actions.write( 82 | output = out, 83 | content = str(dynamic_args), 84 | ) 85 | return [DVTestInfo(**provider_args)] 86 | 87 | verilog_dv_test_cfg = rule( 88 | doc = """A DV test configuration. 89 | 90 | This is not a executable target. It generates multiple files which may then 91 | be used by simmer (the wrapping tool to invoke the simulator). 92 | """, 93 | implementation = _verilog_dv_test_cfg_impl, 94 | attrs = { 95 | "abstract": attr.bool( 96 | default = False, 97 | doc = "When True, this configuration is abstract and does not represent a complete configuration.\n" + 98 | "It is not intended to be executed. It is only intended to be used as a base for other test configurations to inherit from.\n" + 99 | "See 'inherits' attribute.\n", 100 | ), 101 | "inherits": attr.label_list( 102 | doc = "Inherit configurations from other verilog_dv_test_cfg targets.\n" + 103 | "Entries later in the list will override arguments set by previous inherits entries.\n" + 104 | "Only attributes noted as inheritable in documentation may be inherited.\n" + 105 | "Any field explicitly set in this rule will override values set via inheritance.", 106 | ), 107 | "uvm_testname": attr.string( 108 | doc = "UVM testname eventually passed to simulator via plusarg +UVM_TESTNAME.\n" + 109 | "This attribute is inheritable. See 'inherits' attribute.\n", 110 | ), 111 | "tb": attr.label( 112 | doc = "The testbench to run this test on. This label must be a 'verilog_dv_tb' target." + 113 | "This attribute is inheritable. See 'inherits' attribute.\n" + 114 | "Future: Allow tb to be a list of labels to allow a test to run on multiple verilog_dv_tb", 115 | ), 116 | "sim_opts": attr.string_dict( 117 | doc = "Additional simulation options. These are 'runtime' arguments. Preprocessor or compiler directives will not take effect.\n" + 118 | "The (key, value) pairs are joined without additional characters." + 119 | "For unary arguments (e.g. +DISABLE_SCOREBOARD), set the value to be the empty string.\n" + 120 | "For arguments with a value (e.g. +UVM_VERBOSITY=UVM_MEDIUM), add an '=' as a suffix to the key.\n" + 121 | "This attribute is inheritable. See 'inherits' attribute.\n" + 122 | "Unlike other inheritable attributes, values in sim_opts are not entirely overridden. Instead, the dictionary is 'updated' with new values at each successive level.\n" + 123 | "This allows for the override of individual simopts for finer-grained control.", 124 | ), 125 | "no_run": attr.bool( 126 | default = False, 127 | doc = "Set to True to skip running this test.\n" + 128 | "This flag is not used by bazel but is used as a query filter by simmer." + 129 | "TODO: Deprecate this flag in favor of using built-in tags.", 130 | ), 131 | "sockets": attr.string_dict( 132 | doc = "Dictionary mapping of socket_name to socket_command.\n" + 133 | "Simmer has the ability to spawn parallel processes to the primary simulation that are connected via sockets.\n" + 134 | "For each entry in the dictionary, simmer will create a separate process and pass a unique temporary file path to both the simulator and the socket_command.\n" + 135 | "The socket name is a short identifier that will be passed as \"+SOCKET__=\" to the simulator.\n" + 136 | "The socket_file is a path to a unique temporary file in the simulation results directory created by simmer.\n" + 137 | "The socket_command is a bash command that must contain a python string formatter of \"{socket_file}\".\n" + 138 | "The socket_command will be run from the root of the project tree.", 139 | ), 140 | "pre_run": attr.string( 141 | doc = "Simmer has the ability to execute a user-specified bazel run command before starting the RTL simulation process.\n" + 142 | "This attribute is where the user can define that bazel run command, on a per-test basis.\n" + 143 | "For example, if the use wants to run 'bazel run //foo:bar' before their simulation, set this attribute to '//foo:bar'.", 144 | ), 145 | "timeout": attr.int( 146 | default = -1, 147 | doc = "Duration in minutes before the test will be killed due to timeout.\n" + 148 | "This option is inheritable.", 149 | ), 150 | }, 151 | outputs = { 152 | "dynamic_args": "%{name}_dynamic_args.py", 153 | }, 154 | ) 155 | 156 | def _verilog_dv_library_impl(ctx): 157 | if ctx.attr.incdir: 158 | # Using dirname may result in bazel-out included in path 159 | directories = depset([f.short_path[:-len(f.basename) - 1] for f in ctx.files.srcs]).to_list() 160 | else: 161 | directories = [] 162 | 163 | # # Add output files from direct dependencies (from genrules) 164 | srcs = depset(ctx.files.srcs, transitive = [dep[DefaultInfo].files for dep in ctx.attr.deps if VerilogInfo not in dep]) 165 | 166 | if len(ctx.files.in_flist): 167 | in_flist = ctx.files.in_flist 168 | else: 169 | in_flist = ctx.files.srcs 170 | 171 | content = [] 172 | for d in directories: 173 | if d == "": 174 | d = "." 175 | content.append("+incdir+{}".format(d)) 176 | for f in in_flist: 177 | content.append(f.short_path) 178 | 179 | all_sos = [] 180 | for dpi in ctx.attr.dpi: 181 | sos = [] 182 | for gfile in dpi[DefaultInfo].files.to_list(): 183 | if gfile.path.endswith(".so"): 184 | sos.append(gfile) 185 | if len(sos) != 1: 186 | fail("Expected to find exactly one .so for verilog_dv_library dpi argument '", dpi, "'. Found .so: ", sos) 187 | all_sos.extend(sos) 188 | 189 | out = ctx.outputs.out 190 | ctx.actions.write( 191 | output = out, 192 | content = "\n".join(content), 193 | ) 194 | 195 | trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps + ctx.attr.dpi, VerilogInfo, "transitive_sources", allow_other_outputs = True) 196 | trans_flists = get_transitive_srcs([out], ctx.attr.deps, VerilogInfo, "transitive_flists", allow_other_outputs = False) 197 | trans_dpi = get_transitive_srcs(all_sos, ctx.attr.deps, VerilogInfo, "transitive_dpi", allow_other_outputs = False) 198 | 199 | all_files = depset(trans_srcs.to_list() + trans_flists.to_list()) 200 | 201 | return [ 202 | VerilogInfo(transitive_sources = trans_srcs, transitive_flists = trans_flists, transitive_dpi = trans_dpi), 203 | DefaultInfo( 204 | files = all_files, 205 | runfiles = ctx.runfiles(files = trans_srcs.to_list() + trans_flists.to_list()), 206 | ), 207 | ] 208 | 209 | verilog_dv_library = rule( 210 | doc = """A DV Library. 211 | 212 | Creates a generated flist file from a list of source files. 213 | """, 214 | implementation = _verilog_dv_library_impl, 215 | attrs = { 216 | "srcs": attr.label_list( 217 | allow_files = True, 218 | mandatory = True, 219 | doc = "Systemverilog source files.\n" + 220 | "Files are assumed to be \\`included inside another file (e.g. the package file) and will not be placed on directly in the flist unless declared in the 'in_flist' attribute.", 221 | ), 222 | "deps": attr.label_list( 223 | doc = "verilog_dv_library targets that this target is dependent on.", 224 | ), 225 | "in_flist": attr.label_list( 226 | allow_files = True, 227 | doc = "Files to be placed directly in the generated flist.\n" + 228 | "Best practice recommends 'pkg' and 'interface' files be declared here.\n" + 229 | "If this attribute is empty (default), all srcs will put into the flist instead.", 230 | ), 231 | "dpi": attr.label_list( 232 | doc = "cc_libraries to link in through the DPI. Currently, cc_import is not supported for precompiled shared libraries.", 233 | ), 234 | "incdir": attr.bool( 235 | default = True, 236 | doc = "Generate a +incdir in generated flist for every file's directory declared in 'srcs' attribute.", 237 | ), 238 | }, 239 | outputs = {"out": "%{name}.f"}, 240 | ) 241 | 242 | def _verilog_dv_tb_impl(ctx): 243 | defines = {} 244 | defines.update(ctx.attr.defines) 245 | defines.update(gather_shell_defines(ctx.attr.shells)) 246 | 247 | top = "tb_top" 248 | vcs_extra_compile_args = [] 249 | xrun_extra_compile_args = [] 250 | pldm_ice_extra_compile_args = [] 251 | if len(ctx.attr.verilog_config): 252 | top = ctx.attr.verilog_config.keys()[0] 253 | cfg = ctx.attr.verilog_config[top] 254 | vcs_extra_compile_args.append(cfg) 255 | xrun_extra_compile_args.append("-compcnfg {}".format(cfg)) 256 | vcs_extra_compile_args.append("-top {}".format(top)) 257 | xrun_extra_compile_args.append("-top {}".format(top)) 258 | vcs_extra_compile_args.extend(ctx.attr.extra_compile_args) 259 | xrun_extra_compile_args.extend(ctx.attr.extra_compile_args) 260 | pldm_ice_extra_compile_args.extend(ctx.attr.extra_compile_args) 261 | 262 | ctx.actions.expand_template( 263 | template = ctx.file._compile_args_template_vcs, 264 | output = ctx.outputs.compile_args_vcs, 265 | substitutions = { 266 | "{COMPILE_ARGS}": ctx.expand_location("\n".join(vcs_extra_compile_args), targets = ctx.attr.extra_runfiles), 267 | "{DEFINES}": "\n".join(["+define+{}{}".format(key, value) for key, value in defines.items()]), 268 | "{FLISTS}": flists_to_arguments(ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_flists", "\n-f"), 269 | }, 270 | ) 271 | ctx.actions.expand_template( 272 | template = ctx.file._compile_args_template_xrun, 273 | output = ctx.outputs.compile_args_xrun, 274 | substitutions = { 275 | "{COMPILE_ARGS}": ctx.expand_location("\n".join(xrun_extra_compile_args), targets = ctx.attr.extra_runfiles), 276 | "{DEFINES}": "\n".join(["-define {}{}".format(key, value) for key, value in defines.items()]), 277 | "{FLISTS}": flists_to_arguments(ctx.attr.deps + ctx.attr.shells, VerilogInfo, "transitive_flists", "\n-f"), 278 | }, 279 | ) 280 | ctx.actions.expand_template( 281 | template = ctx.file._compile_args_template_pldm_ice, 282 | output = ctx.outputs.compile_args_pldm_ice, 283 | substitutions = { 284 | "{COMPILE_ARGS}": ctx.expand_location("\n".join(pldm_ice_extra_compile_args), targets = ctx.attr.extra_runfiles), 285 | "{DEFINES}": "\n".join(["+define+{}{}".format(key, value) for key, value in defines.items()]), 286 | "{FLISTS}": flists_to_arguments(ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_flists", "\n-f"), 287 | }, 288 | ) 289 | ctx.actions.expand_template( 290 | template = ctx.file._runtime_args_template, 291 | output = ctx.outputs.runtime_args, 292 | substitutions = { 293 | "{RUNTIME_ARGS}": ctx.expand_location("\n".join(ctx.attr.extra_runtime_args), targets = ctx.attr.extra_runfiles), 294 | "{DPI_LIBS}": flists_to_arguments(ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_dpi", "-sv_lib"), 295 | }, 296 | ) 297 | ctx.actions.write( 298 | output = ctx.outputs.compile_warning_waivers, 299 | content = "[\n" + "\n".join(["re.compile('{}'),".format(ww) for ww in ctx.attr.warning_waivers]) + "\n]\n", 300 | ) 301 | 302 | # Null action to trigger run? 303 | ctx.actions.run_shell( 304 | command = "echo \"Build compile tree directory in \"`pwd`; touch {}".format(ctx.outputs.executable.path), 305 | outputs = [ctx.outputs.executable], 306 | ) 307 | 308 | trans_srcs = get_transitive_srcs([], ctx.attr.deps + ctx.attr.shells, VerilogInfo, "transitive_sources", allow_other_outputs = True) 309 | trans_flists = get_transitive_srcs([], ctx.attr.deps + ctx.attr.shells, VerilogInfo, "transitive_flists", allow_other_outputs = False) 310 | out_deps = depset([ctx.outputs.compile_args_vcs, ctx.outputs.compile_args_xrun, ctx.outputs.compile_args_pldm_ice, ctx.outputs.runtime_args, ctx.outputs.compile_warning_waivers, ctx.outputs.executable]) 311 | all_files = depset([], transitive = [trans_srcs, trans_flists, out_deps]) 312 | 313 | return [ 314 | DefaultInfo( 315 | files = all_files, 316 | runfiles = ctx.runfiles(files = trans_srcs.to_list() + trans_flists.to_list() + out_deps.to_list() + ctx.files.ccf + ctx.files.extra_runfiles + [ctx.file._default_sim_opts_xrun] + [ctx.file._default_sim_opts_vcs]), 317 | ), 318 | DVTBInfo( 319 | ccf = ctx.files.ccf, 320 | ), 321 | ] 322 | 323 | verilog_dv_tb = rule( 324 | doc = """A DV Testbench. 325 | 326 | rules_verilog uses two separate rules to strongly differentiate between 327 | compilation and simulation. verilog_dv_tb is used for compilation and 328 | verilog_dv_test_cfg is used for simulation. 329 | 330 | A verilog_dv_tb describes how to compile a testbench. It is not a 331 | standalone executable bazel rule. It is intended to provide simmer (a 332 | higher level simulation spawning tool) hooks to execute the compile and 333 | subsequent simulations. 334 | """, 335 | implementation = _verilog_dv_tb_impl, 336 | attrs = { 337 | "deps": attr.label_list( 338 | mandatory = True, 339 | doc = "A list of verilog_dv_library or verilog_rtl_library labels that the testbench is dependent on.\n" + 340 | "Dependency ordering within this label list is not necessary if dependencies are consistently declared in all other rules.", 341 | ), 342 | "defines": attr.string_dict( 343 | doc = "Additional preprocessor defines to throw for this testbench compile.\n" + 344 | "Key, value pairs are joined without additional characters. If it is a unary flag, set the value portion to be the empty string.\n" + 345 | "For binary flags, add an '=' as a suffix to the key.", 346 | ), 347 | "warning_waivers": attr.string_list( 348 | doc = "Waive warnings in the compile. By default, simmer promotes all compile warnings to errors.\n" + 349 | "This list is converted to python regular expressions which are imported by simmer to waive warning.\n" + 350 | "All warnings may be waived by using '\\*W'\n", 351 | ), 352 | "shells": attr.label_list( 353 | doc = "List of shells to use. Each label must be a verilog_rtl_shell instance.\n" + 354 | "Each shell thrown will create two defines:\n" + 355 | " \\`define gumi_{module} {module}_shell\n" + 356 | " \\`define gumi_use_{module}_shell\n" + 357 | "The shell module declaration must be guarded by the gumi_use_{module}_shell define:\n" + 358 | " \\`ifdef gumi_use_{module}_shell\n" + 359 | " module {module}_shell(/*AUTOARGS*/);\n" + 360 | " ...\n" + 361 | " endmodule\n" + 362 | " \\`endif\n", 363 | ), 364 | "ccf": attr.label_list( 365 | allow_files = True, 366 | doc = "Coverage configuration file to provider to simmer.", 367 | ), 368 | "extra_compile_args": attr.string_list( 369 | doc = "Additional flags to pass to the compiler.", 370 | ), 371 | "extra_runtime_args": attr.string_list( 372 | doc = "Additional flags to throw to simulation run. These flags will not be provided to the compilation, but will be passed to subsequent simulation invocations.", 373 | ), 374 | "extra_runfiles": attr.label_list( 375 | allow_files = True, 376 | doc = "Additional files that need to be passed as runfiles to bazel. Most commonly used for files referred to by extra_compile_args or extra_runtime_args.", 377 | ), 378 | "verilog_config": attr.string_dict( 379 | doc = "Key/value pair where the key represents the name of the config object,\n" + 380 | "and the value represents a relative pointer to the config .v file.", 381 | ), 382 | "_default_sim_opts_xrun": attr.label( 383 | allow_single_file = True, 384 | default = "@rules_verilog//vendors/cadence:verilog_dv_default_sim_opts.f", 385 | doc = "Default XRUN simulation options.", 386 | ), 387 | "_default_sim_opts_vcs": attr.label( 388 | allow_single_file = True, 389 | default = "@rules_verilog//vendors/synopsys:verilog_dv_default_sim_opts.f", 390 | doc = "Default VCS simulation options.", 391 | ), 392 | "_compile_args_template_xrun": attr.label( 393 | default = Label("@rules_verilog//vendors/cadence:verilog_dv_tb_compile_args.f.template"), 394 | allow_single_file = True, 395 | doc = "Template to generate compilation arguments flist.", 396 | ), 397 | "_compile_args_template_vcs": attr.label( 398 | default = Label("@rules_verilog//vendors/synopsys:verilog_dv_tb_compile_args.f.template"), 399 | allow_single_file = True, 400 | doc = "Template to generate compilation arguments flist.", 401 | ), 402 | "_compile_args_template_pldm_ice": attr.label( 403 | default = Label("@rules_verilog//vendors/cadence:verilog_dv_tb_compile_args_pldm_ice.f.template"), 404 | allow_single_file = True, 405 | doc = "Template to generate compilation arguments flist.", 406 | ), 407 | "_runtime_args_template": attr.label( 408 | default = Label("@rules_verilog//vendors/common:verilog_dv_tb_runtime_args.f.template"), 409 | allow_single_file = True, 410 | doc = "Template to generate runtime args form the 'extra_runtime_args' attribute.", 411 | ), 412 | }, 413 | outputs = { 414 | "runtime_args": "%{name}_runtime_args.f", 415 | "compile_args_vcs": "%{name}_compile_args_vcs.f", 416 | "compile_args_xrun": "%{name}_compile_args_xrun.f", 417 | "compile_args_pldm_ice": "%{name}_compile_args_pldm_ice.f", 418 | "compile_warning_waivers": "%{name}_compile_warning_waivers", 419 | }, 420 | # TODO does this still need to be executable with a empty command? 421 | executable = True, 422 | ) 423 | 424 | def _verilog_dv_unit_test_impl(ctx): 425 | trans_srcs = get_transitive_srcs([], ctx.attr.deps, VerilogInfo, "transitive_sources") 426 | srcs_list = trans_srcs.to_list() 427 | flists = get_transitive_srcs([], ctx.attr.deps, VerilogInfo, "transitive_flists") 428 | flists_list = flists.to_list() 429 | 430 | ctx.actions.expand_template( 431 | template = ctx.file.ut_sim_template, 432 | output = ctx.outputs.out, 433 | substitutions = { 434 | "{SIMULATOR_COMMAND}": ctx.attr._command_override[ToolEncapsulationInfo].command, 435 | "{DEFAULT_SIM_OPTS}": "-f {}".format(ctx.file.default_sim_opts.short_path), 436 | "{DPI_LIBS}": flists_to_arguments(ctx.attr.deps, VerilogInfo, "transitive_dpi", "-sv_lib"), 437 | "{FLISTS}": " ".join(["-f {}".format(f.short_path) for f in flists_list]), 438 | "{SIM_ARGS}": " ".join(ctx.attr.sim_args), 439 | }, 440 | is_executable = True, 441 | ) 442 | 443 | runfiles = ctx.runfiles(files = flists_list + srcs_list + [ctx.file.default_sim_opts]) 444 | return [DefaultInfo( 445 | runfiles = runfiles, 446 | executable = ctx.outputs.out, 447 | )] 448 | 449 | verilog_dv_unit_test = rule( 450 | # TODO this could just be a specific use case of verilog_test 451 | doc = """Compiles and runs a small unit test for DV. 452 | 453 | This is typically a unit test for a single verilog_dv_library and its dependencies. 454 | Additional sim options may be passed after '--' in the bazel command. 455 | Interactive example: 456 | bazel run //digital/dv/interfaces/apb_pkg:test -- -gui 457 | For ci testing purposes: 458 | bazel test //digital/dv/interfaces/apb_pkg:test 459 | """, 460 | implementation = _verilog_dv_unit_test_impl, 461 | attrs = { 462 | "deps": attr.label_list( 463 | mandatory = True, 464 | doc = "verilog_dv_library or verilog_rtl_library labels that the testbench is dependent on.\n" + 465 | "Dependency ordering within this label list is not necessary if dependencies are consistently declared in all other rules.", 466 | ), 467 | "ut_sim_template": attr.label( 468 | allow_single_file = True, 469 | default = Label("@rules_verilog//vendors/cadence:verilog_dv_unit_test.sh.template"), 470 | doc = "The template to generate the bash script to run the simulation.", 471 | ), 472 | "default_sim_opts": attr.label( 473 | allow_single_file = True, 474 | default = "@rules_verilog//vendors/cadence:verilog_dv_default_sim_opts.f", 475 | doc = "Default simulator options to pass to the simulator.", 476 | # TODO remove this and just make it part of the template? 477 | ), 478 | "sim_args": attr.string_list( 479 | doc = "Additional arguments to pass on command line to the simulator.\n" + 480 | "Both compile and runtime arguments are allowed because dv_unit_test runs as a single step flow.", 481 | ), 482 | "_command_override": attr.label( 483 | default = Label("@rules_verilog//:verilog_dv_unit_test_command"), 484 | doc = "Allows custom override of simulator command in the event of wrapping via modulefiles.\n" + 485 | "Example override in project's .bazelrc:\n" + 486 | ' build --@rules_verilog//:verilog_dv_unit_test_command="runmod -t xrun --"', 487 | ), 488 | }, 489 | outputs = {"out": "%{name}_run.sh"}, 490 | test = True, 491 | ) 492 | 493 | def _verilog_dv_test_cfg_info_aspect_impl(target, ctx): 494 | # buildifier: disable=print 495 | print("verilog_dv_test_cfg_info({}, {}, {})".format(target.label, target[DVTestInfo].tb.label, target[DVTestInfo].tags)) 496 | 497 | # buildifier: enable=print 498 | return [] 499 | 500 | verilog_dv_test_cfg_info_aspect = aspect( 501 | doc = """Gather information about the tb and tags related to a verilog_dv_test_config for use in simmer.""", 502 | implementation = _verilog_dv_test_cfg_info_aspect_impl, 503 | attr_aspects = ["deps", "tags"], 504 | ) 505 | 506 | def _verilog_dv_tb_ccf_aspect_impl(target, ctx): 507 | # buildifier: disable=print 508 | print("verilog_dv_tb_ccf({})".format([f.path for f in target[DVTBInfo].ccf])) 509 | 510 | # buildifier: enable=print 511 | return [] 512 | 513 | verilog_dv_tb_ccf_aspect = aspect( 514 | doc = """Find test to find ccf file mappings simmer.""", 515 | implementation = _verilog_dv_tb_ccf_aspect_impl, 516 | attr_aspects = ["ccf"], 517 | ) 518 | -------------------------------------------------------------------------------- /verilog/private/rtl.bzl: -------------------------------------------------------------------------------- 1 | """Rules to gather and compile RTL.""" 2 | 3 | load(":verilog.bzl", "CUSTOM_SHELL", "ShellInfo", "ToolEncapsulationInfo", "VerilogInfo", "gather_shell_defines", "get_transitive_srcs") 4 | 5 | _SHELLS_DOC = """List of verilog_rtl_shell Labels. 6 | For each Label, a gumi define will be placed on the command line to use this shell instead of the original module. 7 | This requires that the original module was instantiated using \\`gumi_ instead of just .""" 8 | 9 | def create_flist_content(ctx, gumi_path, allow_library_discovery, no_synth = False, makelib = ""): 10 | """Create the content of a '.f' file. 11 | 12 | Args: 13 | gumi_path: The path to the dynamically created gumi file to include. 14 | 15 | The gumi file is put directly on the command line to ensure that the 16 | defines are always used. 17 | allow_library_discovery: When false, modules are placed directly on the command line. 18 | 19 | Preference is to use the -y (modules in this directory can be found by 20 | searching for a file with the same name) and -v (file is a library file 21 | containing multiple modules) flags. Some tools, e.g. Genus, do not 22 | handle -y correctly when invoked many times. As a workaround for these 23 | tools, setting allow_library_discovery to false will put all module 24 | files and library files directly onto the command line. 25 | no_synth: When true, filter any target that sets no_synth=True 26 | 27 | This is an extra precaution to ensure that nonsynthesizable libraries 28 | are not passed to the synthesis tool. 29 | 30 | Returns: 31 | List of strings representing flist content. 32 | """ 33 | flist_content = [] 34 | 35 | # Using dirname may result in bazel-out included in path 36 | incdir = depset([f.short_path[:-len(f.basename) - 1] for f in ctx.files.headers]).to_list() 37 | for d in incdir: 38 | flist_content.append("+incdir+{}".format(d)) 39 | 40 | # Using dirname may result in bazel-out included in path 41 | libdir = depset([f.short_path[:-len(f.basename) - 1] for f in ctx.files.modules]).to_list() 42 | 43 | flist_content.append(gumi_path) 44 | 45 | # if using makelib, start here 46 | if len(makelib): 47 | flist_content.append("-makelib") 48 | flist_content.append(makelib) 49 | 50 | if not no_synth: 51 | if allow_library_discovery: 52 | for d in libdir: 53 | if d == "": 54 | d = "." 55 | flist_content.append("-y {}".format(d)) 56 | else: 57 | flist_content += [f.short_path for f in ctx.files.modules] 58 | 59 | for f in ctx.files.lib_files: 60 | if allow_library_discovery: 61 | flist_content.append("-v {}".format(f.short_path)) 62 | else: 63 | flist_content.append(f.short_path) 64 | 65 | for f in ctx.files.direct: 66 | flist_content.append(f.short_path) 67 | 68 | # if using makelib, terminate here 69 | if len(makelib): 70 | flist_content.append("-endlib") 71 | 72 | flist_content.append("") 73 | return flist_content 74 | 75 | def _verilog_rtl_library_impl(ctx): 76 | srcs = ctx.files.headers + ctx.files.modules + ctx.files.lib_files + ctx.files.direct 77 | 78 | if ctx.attr.is_pkg: 79 | # FIXME opu_tx_rx is failing this check 80 | # for dep in ctx.attr.deps: 81 | # if ShellInfo in dep and not dep[ShellInfo].is_pkg: 82 | # fail("verilog_rtl_pkg may only depend on other verilog_rtl_pkg instances") 83 | pass 84 | else: 85 | for src in srcs: 86 | if "_pkg" in src.basename: 87 | fail("Package files should not declared in a verilog_rtl_library. Use a verilog_rtl_pkg instead. {} is declared in {}".format(src, ctx.label)) 88 | 89 | if ctx.attr.is_shell_of: 90 | if len(ctx.attr.modules) != 1 and not ctx.attr.is_shell_of == CUSTOM_SHELL: 91 | fail("Shells must specify exactly one module") 92 | 93 | # if len(ctx.attr.deps) != 0: 94 | # fail("Shells may not specify deps") 95 | 96 | else: 97 | for dep in ctx.attr.deps: 98 | if ShellInfo in dep and dep[ShellInfo].is_shell_of and not dep[ShellInfo].is_shell_of == CUSTOM_SHELL: 99 | fail("verilog_rtl_library may not depend on shells. Shells should only be included at top-level builds") 100 | for src in srcs: 101 | if "_shell" in src.basename: 102 | fail("Shell files should not be declared in an verilog_rtl_library. Use a verilog_rtl_shell instead. {} is declared in {}".format(src, ctx.label)) 103 | 104 | gumi_path = "" 105 | if ctx.attr.enable_gumi: 106 | gumi = ctx.actions.declare_file("gumi_{name}.vh".format(name = ctx.attr.name)) 107 | gumi_content = [] 108 | 109 | # Making this more unique than just gumi.basename.upper() 110 | # To avoid case where multiple directories define the same name for a verilog_rtl_library 111 | gumi_guard_value = gumi.short_path.replace("/", "_").replace(".", "_") 112 | gumi_guard = "__{}__".format(gumi_guard_value.upper()) 113 | gumi_content.append("`ifndef {}".format(gumi_guard)) 114 | gumi_content.append(" `define {}".format(gumi_guard)) 115 | gumi_content.append("") 116 | gumi_content.append("") 117 | if ctx.attr.gumi_override: 118 | gumi_modules = ctx.attr.gumi_override 119 | else: 120 | gumi_modules = [module.basename[:-len(module.extension) - 1] for module in ctx.files.modules] 121 | for module_name in gumi_modules: 122 | gumi_name = "gumi_{}".format(module_name) 123 | gumi_content.append(" `ifndef {}".format(gumi_name)) 124 | gumi_content.append(" `define {} {}".format(gumi_name, module_name)) 125 | gumi_content.append(" `endif") 126 | gumi_content.append("") 127 | gumi_content.append("") 128 | gumi_content.append("") 129 | gumi_content.append("`endif // guard") 130 | 131 | ctx.actions.write( 132 | output = gumi, 133 | content = "\n".join(gumi_content), 134 | ) 135 | 136 | srcs = [gumi] + srcs 137 | gumi_path = gumi.short_path 138 | elif not (ctx.attr.gumi_file_override == None): 139 | gumi_path = ctx.file.gumi_file_override.short_path 140 | 141 | flist_content = create_flist_content(ctx, gumi_path = gumi_path, allow_library_discovery = False, makelib = ctx.attr.makelib) 142 | 143 | last_module = None 144 | for m in ctx.files.modules: 145 | last_module = m 146 | for m in ctx.files.lib_files: 147 | last_module = m 148 | for m in ctx.files.direct: 149 | last_module = m 150 | 151 | ctx.actions.write( 152 | output = ctx.outputs.flist, 153 | content = "\n".join(flist_content), 154 | ) 155 | 156 | trans_srcs = get_transitive_srcs(srcs, ctx.attr.deps, VerilogInfo, "transitive_sources", allow_other_outputs = True) 157 | trans_flists = get_transitive_srcs([ctx.outputs.flist], ctx.attr.deps, VerilogInfo, "transitive_flists", allow_other_outputs = False) 158 | trans_dpi = get_transitive_srcs([], ctx.attr.deps, VerilogInfo, "transitive_dpi", allow_other_outputs = False) 159 | 160 | runfiles_list = trans_srcs.to_list() + trans_flists.to_list() + trans_dpi.to_list() 161 | runfiles = ctx.runfiles(files = runfiles_list) 162 | 163 | all_files = depset(trans_srcs.to_list() + trans_flists.to_list()) 164 | 165 | return [ 166 | ShellInfo( 167 | is_pkg = ctx.attr.is_pkg, 168 | is_shell_of = ctx.attr.is_shell_of, 169 | gumi_path = gumi_path, 170 | ), 171 | VerilogInfo( 172 | transitive_sources = trans_srcs, 173 | transitive_flists = trans_flists, 174 | transitive_dpi = trans_dpi, 175 | last_module = last_module, 176 | ), 177 | DefaultInfo( 178 | files = all_files, 179 | runfiles = runfiles, 180 | ), 181 | ] 182 | 183 | verilog_rtl_library = rule( 184 | doc = "A collection of RTL design files. Creates a generated flist file to be included later in a compile.", 185 | implementation = _verilog_rtl_library_impl, 186 | attrs = { 187 | "headers": attr.label_list( 188 | allow_files = True, 189 | doc = "Files that will be included into other files.\n" + 190 | "A '+incdir' flag will be added for each source file's directory.", 191 | ), 192 | "modules": attr.label_list( 193 | allow_files = True, 194 | doc = "Verilog files containing a single module where the module name matches the file name.\n" + 195 | "A '-y' flag will be added for each source file's directory.\n" + 196 | "This is the preferred mechanism for specifying RTL modules.", 197 | ), 198 | "lib_files": attr.label_list( 199 | allow_files = True, 200 | doc = "Verilog library files containing multiple modules.\n" + 201 | "A '-v' flag will be added for each file in this attribute.\n" + 202 | "It is preferable to used the 'modules' attribute when possible because library files require parsing entire files to discover all modules.", 203 | ), 204 | "direct": attr.label_list( 205 | allow_files = True, 206 | doc = "Verilog files that must be put directly onto the command line.\n" + 207 | "'modules' should be used instead of 'direct' wherever possible", 208 | ), 209 | "deps": attr.label_list( 210 | doc = "Other verilog libraries this target is dependent upon.\n" + 211 | "All Labels specified here must provide a VerilogInfo provider.", 212 | ), 213 | "no_synth": attr.bool( 214 | default = False, 215 | doc = "When True, do not allow the contents of this library to be exposed to synthesis.\n" + 216 | "TODO: This currently enforced via an Aspect which is not included in this repository.\n" + 217 | "The aspect creates a parallel set of 'synth__*.f' which have the filtered views which are passed to the synthesis tool.", 218 | ), 219 | "is_pkg": attr.bool( 220 | default = False, 221 | doc = "INTERNAL: Do not set in verilog_rtl_library instances.\n" + 222 | "Used for internal bookkeeping for macros derived from verilog_rtl_library.\n" + 223 | "Used to enforce naming conventions related to packages to encourage simple dependency graphs", 224 | ), 225 | "is_shell_of": attr.string( 226 | default = "", 227 | doc = "INTERNAL: Do not set in verilog_rtl_library instances.\n" + 228 | "Used for internal bookkeeping for macros derived from verilog_rtl_library.\n" + 229 | "If set, this library is represents a 'shell' of another module.\n" + 230 | "Allows downstream test rules to specify this Label as a 'shell' to override another instance via the gumi system.", 231 | ), 232 | "enable_gumi": attr.bool( 233 | default = True, 234 | doc = "When set, create an additional file creating default preprocessor values for the gumi system.", 235 | ), 236 | "gumi_file_override": attr.label( 237 | default = None, 238 | allow_single_file = True, 239 | doc = "Allow a more elaborate default set of gumi defines by pointing to another Label or file.\n" + 240 | "Useful for creating a per-instance instead of per-type modules which require additional information.", 241 | ), 242 | "gumi_override": attr.string_list( 243 | doc = "A list of strings of module names to create gumi defines.\n" + 244 | "If empty (default), the modules variable is used instead.\n" + 245 | "Useful when using 'direct' or 'lib_files' or to limit the defines created when using a glob in 'modules'", 246 | ), 247 | "makelib": attr.string( 248 | default = "", 249 | doc = "Used to specify that this RTL lib should be compiled into its own library.\n" + 250 | "String value specified here is used as the name of the compile lib.", 251 | ), 252 | }, 253 | outputs = { 254 | "flist": "%{name}.f", 255 | }, 256 | ) 257 | 258 | def verilog_rtl_pkg( 259 | name, 260 | direct, 261 | no_synth = False, 262 | deps = []): 263 | """A single Systemverilog package. 264 | 265 | This rule is a specialized case of verilog_rtl_library. Systemverilog 266 | packages should be placed into their own rule instance to limit cross 267 | dependencies. In general, a block may depend on another block's package but 268 | should not need to depend on all the modules in the block. 269 | 270 | Args: 271 | name: A unique name for this target. 272 | direct: The Systemverilog file containing the package. 273 | 274 | See verilog_rtl_library::direct. 275 | no_synth: Default False. 276 | 277 | See verilog_rtl_library::no_synth. 278 | deps: Other packages this target is dependent on. 279 | 280 | See verilog_rtl_library::deps. 281 | """ 282 | verilog_rtl_library( 283 | name = name, 284 | direct = direct, 285 | deps = deps, 286 | is_pkg = True, 287 | no_synth = no_synth, 288 | enable_gumi = False, 289 | ) 290 | 291 | def verilog_rtl_shell( 292 | name, 293 | module_to_shell_name, 294 | shell_module_label, 295 | deps = []): 296 | """An RTL shell has the same ports as another module. 297 | 298 | This rule is a specialized case of verilog_rtl_library. 299 | A 'shell' is similar to a 'stub' (empty module), but a shell may contain 300 | limited functionality. Frequent uses include: 301 | * Blackboxing hierarchy that will not be the target of testing 302 | * Replacing functionality with a simpler model (e.g. simulation-only memory models) 303 | 304 | Args: 305 | name: A unique name for this target. 306 | module_to_shell_name: The name of the module that will be replaced. 307 | 308 | When a downstream test uses this 'shell', a gumi define will be created using this name. 309 | 310 | When a shell needs to be hand-edited after generation If 311 | module_to_shell_name == 'custom', then all rules regarding shells are 312 | ignored and gumi shell defines are not thrown, allowing the user great 313 | power. 314 | shell_module_label: The Label or file containing the shell. 315 | 316 | See verilog_rtl_library::no_synth. 317 | deps: Other packages this target is dependent on. 318 | 319 | In general. shells should avoid having dependencies. Exceptions include 320 | necessary packages and possible a DV model to implement functional 321 | behavior. 322 | 323 | See verilog_rtl_library::deps. 324 | """ 325 | if not name.startswith(module_to_shell_name) and module_to_shell_name != CUSTOM_SHELL: 326 | fail("Shell name should start with the original module name: shell name='{}' original module='{}'".format(name, module_to_shell_name)) 327 | verilog_rtl_library( 328 | name = name, 329 | modules = [shell_module_label], 330 | # Intentionally do not set deps here 331 | is_shell_of = module_to_shell_name, 332 | no_synth = True, 333 | enable_gumi = False, 334 | deps = deps, 335 | ) 336 | 337 | def _verilog_rtl_unit_test_impl(ctx): 338 | trans_srcs = get_transitive_srcs([], ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_sources") 339 | srcs_list = trans_srcs.to_list() 340 | flists = get_transitive_srcs([], ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_flists") 341 | flists_list = flists.to_list() 342 | 343 | top = "" 344 | for dep in ctx.attr.deps: 345 | if VerilogInfo in dep and dep[VerilogInfo].last_module: 346 | top = dep[VerilogInfo].last_module.short_path 347 | top_base_name = dep[VerilogInfo].last_module.basename.split(".")[0] 348 | 349 | if top == "": 350 | fail("verilog_rtl_unit_test {} could not determine the top module from the target's dependencies".format(ctx.label)) 351 | 352 | pre_fa = [" \\"] 353 | for key, value in gather_shell_defines(ctx.attr.shells).items(): 354 | pre_fa.append(" -define {}{} \\".format(key, value)) 355 | 356 | if len(ctx.attr.pre_flist_args): 357 | pre_fa.extend(["{} \\".format(pfa) for pfa in ctx.attr.pre_flist_args]) 358 | 359 | pre_fa.append(" \\") 360 | 361 | if len(ctx.attr.post_flist_args): 362 | post_fa = "\n".join(["{} \\".format(pfa) for pfa in ctx.attr.post_flist_args]) + "\n" 363 | else: 364 | post_fa = " \\" 365 | 366 | waves_cmd = ctx.actions.declare_file(ctx.label.name + "_waves.tcl") 367 | ctx.actions.expand_template( 368 | template = ctx.file.ut_sim_waves_template, 369 | output = waves_cmd, 370 | substitutions = { 371 | "{TOP_BASE_NAME}": top_base_name, # buildifier: disable=uninitialized 372 | }, 373 | ) 374 | 375 | ctx.actions.expand_template( 376 | template = ctx.file.ut_sim_template, 377 | output = ctx.outputs.executable, 378 | substitutions = { 379 | "{SIMULATOR_COMMAND}": ctx.attr.command_override[ToolEncapsulationInfo].command, 380 | "{WAVE_VIEWER_COMMAND}": ctx.attr.wave_viewer_command[ToolEncapsulationInfo].command, 381 | "{FLISTS}": " ".join(["-f {}".format(f.short_path) for f in flists_list]), 382 | "{TOP}": top, 383 | "{PRE_FLIST_ARGS}": "\n".join(pre_fa), 384 | "{POST_FLIST_ARGS}": post_fa, 385 | "{WAVES_RENDER_CMD_PATH}": waves_cmd.short_path, 386 | }, 387 | ) 388 | 389 | runfiles = ctx.runfiles(files = flists_list + srcs_list + ctx.files.data + ctx.files.shells + [waves_cmd]) 390 | return [DefaultInfo( 391 | runfiles = runfiles, 392 | )] 393 | 394 | verilog_rtl_unit_test = rule( 395 | # TODO: this could eventually be a specific use case of verilog_test 396 | doc = """Compile and simulate a verilog_rtl_library. 397 | 398 | Allows a designer to write small unit/directed tests which can be included in regression. 399 | 400 | This rule is capable of running SVUnit regressions as well. See ut_sim_template attribute. 401 | 402 | This unit test can either immediately launch a waveform viewer, or it can render a waveform database which can be loaded separately. 403 | To launch the waveform viewer after the test completes, run the following: 'bazel run -- --launch &'. 404 | To render a database without launching a viewer, run the following: 'bazel run -- --waves'. 405 | Any other unknown options will be passed directly to the simulator, for example: 'bazel run -- --waves +my_arg=4'. 406 | 407 | Typically, an additional verilog_rtl_library containing 'unit_test_top.sv' 408 | is created. This unit_test_top will be dependent on the DUT top, and will 409 | be the only entry in the `deps` attribute list provided to verilog_rtl_unit_test. 410 | """, 411 | implementation = _verilog_rtl_unit_test_impl, 412 | attrs = { 413 | "deps": attr.label_list( 414 | mandatory = True, 415 | doc = "Other verilog libraries this target is dependent upon.\n" + 416 | "All Labels specified here must provide a VerilogInfo provider.", 417 | ), 418 | "ut_sim_template": attr.label( 419 | allow_single_file = True, 420 | default = Label("@rules_verilog//vendors/cadence:verilog_rtl_unit_test.sh.template"), 421 | doc = "The template to generate the script to run the test.\n" + 422 | "Also available is a [SVUnit](http://agilesoc.com/open-source-projects/svunit/) test template: @rules_verilog//vendors/cadence:verilog_rtl_unit_test_svunit.sh.template\n" + 423 | "If using the SVUnit template, you may also want to throw:\n" + 424 | "```" + 425 | " post_flist_args = [\n" + 426 | " \"--directory \",\n" + 427 | " ]," + 428 | "```", 429 | ), 430 | "ut_sim_waves_template": attr.label( 431 | allow_single_file = True, 432 | default = Label("@rules_verilog//vendors/cadence:verilog_rtl_unit_test_waves.tcl.template"), 433 | doc = "The template to generate the waves command script to run in the test.\n" + 434 | "When using the SVUnit ut_sim_template or a custom SVUnit invocation, the default verilog_rtl_unit_test_waves.tcl.template will not work. " + 435 | "You must either write your own waves script or use the SVUnit waves template: " + 436 | "@rules_verilog//vendors/cadence:verilog_rtl_unit_test_svunit_waves.tcl.template\n", 437 | ), 438 | "command_override": attr.label( 439 | default = Label("@rules_verilog//:verilog_rtl_unit_test_command"), 440 | doc = "Allows custom override of simulator command in the event of wrapping via modulefiles.\n" + 441 | "Example override in project's .bazelrc:\n" + 442 | ' build --@rules_verilog//:verilog_rtl_unit_test_command="runmod -t xrun --"', 443 | ), 444 | "wave_viewer_command": attr.label( 445 | default = Label("@rules_verilog//:verilog_rtl_wave_viewer_command"), 446 | doc = "Allows custom override of waveform viewer command in the event of wrapping via modulefiles.\n" + 447 | "Example override in project's .bazelrc:\n" + 448 | ' build --@rules_verilog//:verilog_rtl_wave_viewer_command="runmod xrun --"', 449 | ), 450 | "data": attr.label_list( 451 | allow_files = True, 452 | doc = "Non-verilog dependencies. Useful when reading in data files as stimulus/prediction.", 453 | ), 454 | "shells": attr.label_list( 455 | doc = _SHELLS_DOC, 456 | ), 457 | "pre_flist_args": attr.string_list( 458 | doc = "Additional command line arguments to be placed after the simulator binary but before the flist arguments.\n" + 459 | "See ut_sim_template attribute for exact layout." + 460 | "For defines to have effect, they must be declared in pre_flist_args not post_flist_args.", 461 | ), 462 | "post_flist_args": attr.string_list( 463 | doc = "Additional command line arguments to be placed after the flist arguments\n" + 464 | "See ut_sim_template attribute for exact layout.", 465 | ), 466 | }, 467 | test = True, 468 | ) 469 | 470 | def _verilog_rtl_lint_test_impl(ctx): 471 | trans_flists = get_transitive_srcs([], ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_flists", allow_other_outputs = False) 472 | 473 | # This is a workaround for an issue with using -define in Ascent and will be removed once the Ascent issue is fixed 474 | # See github issue #24 475 | shell_defines_string = "-define {}{}" 476 | attr_defines_string = "-define {}{}" 477 | if str(ctx.attr.run_template.label) == "@rules_verilog//vendors/real_intent:verilog_rtl_lint_test.sh.template": 478 | shell_defines_string = "+define+{}{}" 479 | attr_defines_string = "+define+{}{}" 480 | 481 | defines = [shell_defines_string.format("LINT", "")] 482 | defines.extend([shell_defines_string.format(key, value) for key, value in gather_shell_defines(ctx.attr.shells).items()]) 483 | defines.extend([attr_defines_string.format(key, value) for key, value in ctx.attr.defines.items()]) 484 | 485 | top_path = "" 486 | for dep in ctx.attr.deps: 487 | if VerilogInfo in dep and dep[VerilogInfo].last_module: 488 | top_path = dep[VerilogInfo].last_module.short_path 489 | 490 | if top_path == "": 491 | fail("verilog_rtl_lint_test {} could not determine the top module from the target's dependencies".format(ctx.label())) 492 | 493 | if len(ctx.files.rulefile) > 1: 494 | fail("Only one rulefile allowed, but {} has several rulefiles".format(ctx.label)) 495 | 496 | ctx.actions.expand_template( 497 | template = ctx.file.command_template, 498 | output = ctx.outputs.command_script, 499 | substitutions = { 500 | "{RULEFILE}": "".join([f.short_path for f in ctx.files.rulefile]), 501 | "{DEFINES}": " ".join(defines), 502 | "{FLISTS}": " ".join(["-f {}".format(f.short_path) for f in trans_flists.to_list()]), 503 | "{TOP_PATH}": top_path, 504 | "{INST_TOP}": ctx.attr.top, 505 | "{LINT_PARSER}": ctx.files.lint_parser[0].short_path, 506 | }, 507 | ) 508 | 509 | ctx.actions.expand_template( 510 | template = ctx.file.run_template, 511 | output = ctx.outputs.executable, 512 | substitutions = { 513 | "{SIMULATOR_COMMAND}": ctx.attr._command_override[ToolEncapsulationInfo].command, 514 | "{COMMAND_SCRIPT}": ctx.outputs.command_script.short_path, 515 | "{DEFINES}": " ".join(defines), 516 | "{FLISTS}": " ".join(["-f {}".format(f.short_path) for f in trans_flists.to_list()]), 517 | "{TOP_PATH}": top_path, 518 | "{DESIGN_INFO}": " ".join(["{}".format(design_info.short_path) for design_info in ctx.files.design_info]), 519 | "{RULEFILE}": "".join([f.short_path for f in ctx.files.rulefile]), 520 | "{INST_TOP}": ctx.attr.top, 521 | "{LINT_PARSER}": ctx.files.lint_parser[0].short_path, 522 | "{LINT_PARSER_LIB}": ctx.files._lint_parser_lib[0].dirname, 523 | "{WAIVER_DIRECT}": ctx.attr.waiver_direct, 524 | }, 525 | ) 526 | 527 | trans_flists = get_transitive_srcs([], ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_flists", allow_other_outputs = False) 528 | trans_srcs = get_transitive_srcs([], ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_sources", allow_other_outputs = True) 529 | 530 | runfiles = ctx.runfiles(files = trans_srcs.to_list() + trans_flists.to_list() + ctx.files.design_info + ctx.files.rulefile + ctx.files.lint_parser + ctx.files._lint_parser_lib + [ctx.outputs.command_script]) 531 | 532 | return [ 533 | DefaultInfo(runfiles = runfiles), 534 | ] 535 | 536 | verilog_rtl_lint_test = rule( 537 | doc = """Compile and run lint on target 538 | 539 | This rule was originally written for Cadence HAL to be run under xcelium. As such, it 540 | is not entirely generic. It also uses a log post-processor 541 | (passed in by the lint_parser attribute) to allow for easier waiving of warnings. 542 | 543 | The DUT must have no unwaived warning/errors in order for this rule to 544 | pass. The intended philosophy is for blocks to maintain a clean lint status 545 | throughout the lifecycle of the project, not to run lint as a checklist 546 | item towards the end of the project. 547 | 548 | There are several attributes in this rule that must be kept in sync. 549 | run_template, rulefile, lint_parser, and command_template must use the associated 550 | files for each vendor. The default values all point to the Cadence HAL versions. 551 | If an instance of this rule overrides any values, they must override all four. 552 | 553 | """, 554 | implementation = _verilog_rtl_lint_test_impl, 555 | attrs = { 556 | "deps": attr.label_list( 557 | mandatory = True, 558 | doc = "Other verilog libraries this target is dependent upon.\n" + 559 | "All Labels specified here must provide a VerilogInfo provider.", 560 | ), 561 | "run_template": attr.label( 562 | allow_single_file = True, 563 | default = Label("@rules_verilog//vendors/cadence:verilog_rtl_lint_test.sh.template"), 564 | doc = "The template to generate the script to run the lint test.\n" + 565 | "The command templates are located at " + 566 | "@rules_verilog//vendors//verilog_rtl_lint_test.tcl.template\n", 567 | ), 568 | "rulefile": attr.label( 569 | allow_single_file = True, 570 | mandatory = True, 571 | doc = "The rules configuration file for this lint run. rules_verilog doesn't provide a reference rulefile, " + 572 | "each project that uses rules_verilog must write their own tool-specific rulefile.\n" + 573 | "Example HAL rulefile: https://github.com/freecores/t6507lp/blob/ca7d7ea779082900699310db459a544133fe258a/lint/run/hal.def", 574 | ), 575 | "shells": attr.label_list( 576 | doc = _SHELLS_DOC, 577 | ), 578 | "top": attr.string( 579 | doc = "The name of the top-level module for this lint run", 580 | mandatory = True, 581 | ), 582 | "design_info": attr.label_list( 583 | allow_files = True, 584 | doc = "A Cadence design_info file to add additional lint rule/waivers", 585 | ), 586 | "defines": attr.string_dict( 587 | allow_empty = True, 588 | doc = "List of additional \\`defines for this lint run.\nLINT is always defined by default\n" + 589 | "If a define is only for control and has no value, " + 590 | "e.g. \\`define USE_AXI, the dictionary entry key should be \"USE_AXI\" and the value should be the empty string.\n" + 591 | "If a define needs a value, e.g. \\`define WIDTH 8, the dictionary value must start with '=', e.g. '=8'", 592 | ), 593 | "lint_parser": attr.label( 594 | allow_files = True, 595 | default = "@rules_verilog//bin:lint_parser_hal", 596 | doc = "Post processor for lint logs allowing for easier waiving of warnings.\n" + 597 | "Parsers for HAL and Ascent are included in rules_verilog release at " + 598 | "@rules_verilog//lint_parser_(hal|ascent)", 599 | ), 600 | "waiver_direct": attr.string( 601 | doc = "Lint waiver python regex to apply directly to a lint message. This is sometimes needed to work around cases when HAL has formatting errors in xrun.log.xml that cause problems for the lint parser", 602 | ), 603 | "command_template": attr.label( 604 | allow_single_file = True, 605 | default = Label("@rules_verilog//vendors/cadence:verilog_rtl_lint_cmds.tcl.template"), 606 | doc = "The template to generate the command script for this lint test.\n" + 607 | "The command templates are located at " + 608 | "@rules_verilog//vendors//verilog_rtl_lint_cmds.tcl.template\n", 609 | ), 610 | "_command_override": attr.label( 611 | default = Label("@rules_verilog//:verilog_rtl_lint_test_command"), 612 | doc = "Allows custom override of simulator command in the event of wrapping via modulefiles\n" + 613 | "Example override in project's .bazelrc:\n" + 614 | ' build --@rules_verilog//:verilog_rtl_lint_test_command="runmod -t xrun --"', 615 | ), 616 | "_lint_parser_lib": attr.label( 617 | allow_single_file = True, 618 | default = "@rules_verilog//lib:cmn_logging", 619 | doc = "Python library dir needed by lint parser script.\n" + 620 | "Using a private attribute instead of something cleaner\n" + 621 | "because I cannot find a way to create File objects\n" + 622 | "from Label objects to be used with ctx.runfiles", 623 | ), 624 | }, 625 | outputs = { 626 | "command_script": "%{name}_cmds.tcl", 627 | }, 628 | test = True, 629 | ) 630 | 631 | def _verilog_rtl_cdc_test_impl(ctx): 632 | trans_flists = get_transitive_srcs([], ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_flists", allow_other_outputs = False) 633 | trans_srcs = get_transitive_srcs([], ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_sources", allow_other_outputs = True) 634 | 635 | # The run script is simple, the tcl command file has the interesting stuff 636 | ctx.actions.expand_template( 637 | template = ctx.file.bash_template, 638 | output = ctx.outputs.executable, 639 | substitutions = { 640 | "{CDC_COMMAND}": ctx.attr._command_override[ToolEncapsulationInfo].command, 641 | "{PREAMBLE_CMDS}": ctx.outputs.preamble_cmds.short_path, 642 | "{CMD_FILES}": " ".join([cmd_file.short_path for cmd_file in ctx.files.cmd_files]), 643 | "{EPILOGUE_CMDS}": ctx.outputs.epilogue_cmds.short_path, 644 | }, 645 | ) 646 | 647 | defines = ["+define+LINT+CDC"] 648 | 649 | defines.extend(["+{}{}".format(key, value) for key, value in ctx.attr.defines.items()]) 650 | for key, value in gather_shell_defines(ctx.attr.shells).items(): 651 | defines.append("+{}{}".format(key, value)) 652 | 653 | top_path = "" 654 | for dep in ctx.attr.deps: 655 | if VerilogInfo in dep and dep[VerilogInfo].last_module: 656 | top_path = " {}".format(dep[VerilogInfo].last_module.short_path) 657 | if top_path == "": 658 | fail("verilog_rtl_cdc_test {} could not determine the top module from the target's dependencies".format(ctx.label)) 659 | 660 | bbox_modules_cmd = "" 661 | if ctx.attr.bbox_modules: 662 | bbox_modules_cmd = "-bbox_m {" + "{}".format(" ".join(ctx.attr.bbox_modules)) + "}" 663 | 664 | bbox_array_size_cmd = "" 665 | if ctx.attr.bbox_array_size < 0: 666 | fail("verilog_rtl_cdc_test {} was specified with a negative bbox_array_size".format(ctx.label)) 667 | elif ctx.attr.bbox_array_size > 0: 668 | bbox_array_size_cmd = "-bbox_a {}".format(ctx.attr.bbox_array_size) 669 | 670 | ctx.actions.expand_template( 671 | template = ctx.file.preamble_template, 672 | output = ctx.outputs.preamble_cmds, 673 | substitutions = { 674 | "{DEFINES}": "".join(defines), 675 | "{FLISTS}": " ".join(["-f {}".format(f.short_path) for f in trans_flists.to_list()]), 676 | "{TOP_PATH}": top_path, 677 | "{INST_TOP}": ctx.attr.top, 678 | "{BBOX_MODULES_CMD}": bbox_modules_cmd, 679 | "{BBOX_ARRAY_SIZE_CMD}": bbox_array_size_cmd, 680 | }, 681 | ) 682 | 683 | ctx.actions.expand_template( 684 | template = ctx.file.epilogue_template, 685 | output = ctx.outputs.epilogue_cmds, 686 | substitutions = {}, 687 | ) 688 | 689 | runfiles = ctx.runfiles(files = [ctx.outputs.preamble_cmds, ctx.outputs.epilogue_cmds] + trans_srcs.to_list() + trans_flists.to_list() + ctx.files.cmd_files) 690 | 691 | return [ 692 | DefaultInfo(runfiles = runfiles), 693 | ] 694 | 695 | verilog_rtl_cdc_test = rule( 696 | doc = "Run Jaspergold CDC on a verilog_rtl_library.", 697 | implementation = _verilog_rtl_cdc_test_impl, 698 | attrs = { 699 | "deps": attr.label_list( 700 | mandatory = True, 701 | doc = "Other verilog libraries this target is dependent upon.\n" + 702 | "All Labels specified here must provide a VerilogInfo provider.", 703 | ), 704 | "run_template": attr.label( 705 | allow_single_file = True, 706 | default = Label("@rules_verilog//vendors/cadence:verilog_rtl_cdc_test.sh.template"), 707 | doc = "The template to generate the script to run the cdc test.\n", 708 | ), 709 | "preamble_template": attr.label( 710 | allow_single_file = True, 711 | default = Label("@rules_verilog//vendors/cadence:verilog_rtl_cdc_preamble_cmds.tcl.template"), 712 | doc = "The template to generate the initial commands (the preamble) for this cdc test.\n", 713 | ), 714 | "epilogue_template": attr.label( 715 | allow_single_file = True, 716 | default = Label("@rules_verilog//vendors/cadence:verilog_rtl_cdc_epilogue_cmds.tcl.template"), 717 | doc = "The template to generate the final reporting commands for this cdc test.\n", 718 | ), 719 | "shells": attr.label_list( 720 | doc = _SHELLS_DOC, 721 | ), 722 | "top": attr.string( 723 | doc = "The name of the top-level module for this cdc run", 724 | mandatory = True, 725 | ), 726 | "defines": attr.string_dict( 727 | allow_empty = True, 728 | doc = "List of additional \\`defines for this cdc run.\nLINT and CDC are always defined\n" + 729 | "If a define is only for control and has no value, " + 730 | "e.g. \\`define USE_AXI, the dictionary entry key should be \"USE_AXI\" and the value should be the empty string.\n" + 731 | "If a define needs a value, e.g. \\`define WIDTH 8, the dictionary value must start with '=', e.g. '=8'", 732 | ), 733 | "bbox_modules": attr.string_list( 734 | allow_empty = True, 735 | default = [], 736 | doc = "List of modules to black box", 737 | ), 738 | "bbox_array_size": attr.int( 739 | default = 0, 740 | doc = "Black box any RTL array greater than the specified size. If the value of this attribute is 0, the CDC tool will use the default size", 741 | ), 742 | "cmd_files": attr.label_list( 743 | allow_files = True, 744 | doc = "A list of tcl files containing commands to run. Multiple files are allowed to facilitate separating common project commands and block-specific commands.", 745 | mandatory = True, 746 | ), 747 | "bash_template": attr.label( 748 | allow_single_file = True, 749 | default = Label("//vendors/cadence:verilog_rtl_cdc_test.sh.template"), 750 | doc = "The template for the generated bash script which will run the case.", 751 | ), 752 | "_command_override": attr.label( 753 | default = Label("@rules_verilog//:verilog_rtl_cdc_test_command"), 754 | doc = "Allows custom override of simulator command in the event of wrapping via modulefiles\n" + 755 | "Example override in project's .bazelrc:\n" + 756 | ' build --@rules_verilog//:rtl_cdc_test_command="runmod -t jg --"', 757 | ), 758 | }, 759 | outputs = { 760 | "preamble_cmds": "%{name}_preamble_cmds.tcl", 761 | "epilogue_cmds": "%{name}_epilogue_cmds.tcl", 762 | }, 763 | test = True, 764 | ) 765 | -------------------------------------------------------------------------------- /verilog/private/verilog.bzl: -------------------------------------------------------------------------------- 1 | """Generic functions for gathering verilog files.""" 2 | 3 | CUSTOM_SHELL = "custom" 4 | 5 | _SHELLS_DOC = """List of verilog_rtl_shell Labels. 6 | For each Label, a gumi define will be placed on the command line to use this shell instead of the original module. 7 | This requires that the original module was instantiated using \\`gumi_ instead of just .""" 8 | 9 | VerilogInfo = provider(fields = { 10 | "transitive_sources": "All source source files needed by a target. This flow is not currently setup to do partioned compile, so all files need to be carried through to the final step for compilation as a whole.", 11 | "transitive_flists": "All flists which specify ordering of transitive sources.", 12 | "transitive_dpi": "Shared libraries (only .so extension allowed) to link in via the DPI for testbenches.", 13 | "last_module": "This is a convenience accessor. The last module specified is assumed be the top module in a design. This is frequently needed by downstream tools.", 14 | }) 15 | 16 | ShellInfo = provider(fields = { 17 | "is_pkg": "Indicates if this verilog_rtl_library used the verilog_rtl_pkg rule. Additional restrictions are imposed on packages to encourage a clean dependency tree.", 18 | "is_shell_of": "If non-empty, indicates this verilog_rtl_library represents a shell of another module", 19 | "gumi_path": "The bazel short_path to a gumi file. Used when generating a verilog_rtl_library's associated flist.", 20 | }) 21 | 22 | ToolEncapsulationInfo = provider(fields = { 23 | "command": "The command invocation for a particular tool. Useful for aliases, redirection, and wrappers.", 24 | }) 25 | 26 | def _toolencapsulation_impl(ctx): 27 | return ToolEncapsulationInfo(command = ctx.build_setting_value) 28 | 29 | verilog_tool_encapsulation = rule( 30 | implementation = _toolencapsulation_impl, 31 | build_setting = config.string(flag = True), 32 | ) 33 | 34 | def gather_shell_defines(shells): 35 | defines = {} 36 | for shell in shells: 37 | if ShellInfo not in shell: 38 | fail("Not a shell: {}".format(shell)) 39 | if not shell[ShellInfo].is_shell_of: 40 | fail("Not a shell: {}".format(shell)) 41 | if shell[ShellInfo].is_shell_of == CUSTOM_SHELL: 42 | # Don't create a shell define for this shell because it has custom setup 43 | # Usually used when control over per instance shells is desired 44 | continue 45 | 46 | # implied from label name. this could be more explicit 47 | defines["gumi_" + shell[ShellInfo].is_shell_of] = "={}".format(shell.label.name) 48 | defines["gumi_use_{}".format(shell.label.name)] = "" 49 | return defines 50 | 51 | def get_transitive_srcs(srcs, deps, provider, attr_name, allow_other_outputs = False): 52 | """Obtain the source files for a target and its transitive dependencies. 53 | 54 | Args: 55 | srcs: a list of source files 56 | deps: a list of targets that are direct dependencies 57 | 58 | Returns: 59 | a collection of the transitive sources 60 | """ 61 | trans = [] 62 | for dep in deps: 63 | if provider in dep: 64 | trans.append(getattr(dep[provider], attr_name)) 65 | elif allow_other_outputs: 66 | trans.append(dep[DefaultInfo].files) 67 | 68 | return depset( 69 | srcs, 70 | transitive = trans, 71 | ) 72 | 73 | def flists_to_arguments(deps, provider, field, prefix, separator = ""): 74 | trans = [] 75 | for dep in deps: 76 | if provider in dep: 77 | trans.extend(getattr(dep[provider], field).to_list()) 78 | 79 | # else: 80 | # trans.extend(dep[DefaultInfo].files.to_list()) 81 | 82 | #return "".join([" -f {}".format(flist.path) for flist in trans]) 83 | trans_depset = depset(trans) 84 | trans = trans_depset.to_list() 85 | 86 | return separator.join([" {} {}".format(prefix, flist.short_path) for flist in trans]) 87 | 88 | def _verilog_test_impl(ctx): 89 | trans_srcs = get_transitive_srcs([], ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_sources") 90 | srcs_list = trans_srcs.to_list() 91 | flists = get_transitive_srcs([], ctx.attr.shells + ctx.attr.deps, VerilogInfo, "transitive_flists") 92 | flists_list = flists.to_list() 93 | 94 | content = [] 95 | 96 | if ctx.attr.tool: 97 | content.append(ctx.attr.tool[DefaultInfo].files_to_run.executable.short_path) 98 | 99 | flists_args = ["-f {}".format(f.short_path) for f in flists_list] 100 | content += ctx.attr.pre_flist_args 101 | 102 | for key, value in gather_shell_defines(ctx.attr.shells).items(): 103 | content.append(" +define+{}{}".format(key, value)) 104 | 105 | content += flists_args 106 | for dep in ctx.attr.deps: 107 | if VerilogInfo in dep and dep[VerilogInfo].last_module: 108 | content.append(dep[VerilogInfo].last_module.short_path) 109 | content += ctx.attr.post_flist_args 110 | 111 | content = ctx.expand_location(" ".join(content), targets = ctx.attr.data) 112 | 113 | ctx.actions.write( 114 | output = ctx.outputs.out, 115 | content = content, 116 | is_executable = True, 117 | ) 118 | 119 | if ctx.attr.tool: 120 | tool_runfiles = ctx.attr.tool[DefaultInfo].data_runfiles.files 121 | else: 122 | tool_runfiles = depset([]) 123 | 124 | runfiles = ctx.runfiles(files = flists_list + srcs_list + ctx.files.data, transitive_files = tool_runfiles) 125 | 126 | # runfiles = ctx.runfiles(files = flists_list + srcs_list) 127 | return [DefaultInfo( 128 | runfiles = runfiles, 129 | executable = ctx.outputs.out, 130 | )] 131 | 132 | verilog_test = rule( 133 | doc = """Provides a way to run a test against a set of libs.""", 134 | implementation = _verilog_test_impl, 135 | attrs = { 136 | "deps": attr.label_list( 137 | mandatory = True, 138 | doc = "Other verilog libraries this target is dependent upon.\n" + 139 | "All Labels specified here must provide a VerilogInfo provider.", 140 | ), 141 | "pre_flist_args": attr.string_list(doc = "Commands and arguments before flist arguments"), 142 | "post_flist_args": attr.string_list(doc = "Commands and arguments after flist arguments"), 143 | "shells": attr.label_list( 144 | doc = _SHELLS_DOC, 145 | ), 146 | "data": attr.label_list( 147 | allow_files = True, 148 | doc = "Non-verilog dependencies", 149 | ), 150 | "tool": attr.label(doc = "Label to a single tool to run. Inserted at before pre_flist_args if set. Do not duplicate in pre_flist_args"), 151 | }, 152 | outputs = {"out": "%{name}_run.sh"}, 153 | test = True, 154 | ) 155 | --------------------------------------------------------------------------------