├── .gitignore ├── .gitmodules ├── ASP-DAC-24-Tutorial-CircuitOps-OpenROAD-v4.pptx ├── Dockerfile ├── LICENSE ├── README.md ├── designs ├── gcd.def ├── gcd.sdc.gz ├── gcd.spef.gz ├── gcd.v └── pid_0.65.v ├── platforms ├── lef │ ├── NangateOpenCellLibrary.macro.lef │ ├── NangateOpenCellLibrary.macro.mod.lef │ ├── NangateOpenCellLibrary.macro.rect.lef │ └── NangateOpenCellLibrary.tech.lef ├── lib │ └── NangateOpenCellLibrary_typical.lib └── preprocessed_cell_dictionary.json ├── session1 ├── README.md ├── demo1_flow.py ├── demo1_helpers.py ├── demo1_query.py ├── demo2_IR.py ├── demo2_IR_helpers.py ├── demo2_gate_sizing.py └── demo2_gate_sizing_helpers.py └── session2 ├── demo3_LPG_query_example.py ├── demo4_preroute_net_delay_prediction.py └── demo4_preroute_net_delay_prediction_helpers.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .*.sw[pno] 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "OpenROAD"] 2 | path = OpenROAD 3 | url = https://github.com/The-OpenROAD-Project/OpenROAD.git 4 | [submodule "CircuitOps"] 5 | path = CircuitOps 6 | url = https://github.com/NVlabs/CircuitOps.git 7 | -------------------------------------------------------------------------------- /ASP-DAC-24-Tutorial-CircuitOps-OpenROAD-v4.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ASU-VDA-Lab/ASP-DAC24-Tutorial/7d25bfab195d95d8f41a0df1567d98a8578aa788/ASP-DAC-24-Tutorial-CircuitOps-OpenROAD-v4.pptx -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | WORKDIR /src 4 | ARG DEBIAN_FRONTEND=noninteractive 5 | RUN apt-get update && apt-get install -y gnupg2 ca-certificates 6 | 7 | RUN echo "deb [trusted=yes] https://downloads.skewed.de/apt jammy main" >> /etc/apt/sources.list 8 | RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-key 612DEFB798507F25 9 | RUN apt-get update 10 | 11 | RUN apt-get install -y git 12 | RUN apt-get install -y gcc g++ 13 | RUN apt-get install -y libpython-all-dev 14 | RUN apt-get install -y libboost-all-dev 15 | RUN apt-get install -y libcairo2 16 | RUN apt-get install -y libcairo2-dev 17 | RUN apt-get install -y python3-matplotlib 18 | RUN apt-get install -y nvidia-cuda-toolkit 19 | RUN apt-get update 20 | RUN apt-get install -y python3-graph-tool 21 | 22 | RUN apt-get install -y vim 23 | RUN apt-get install -y python3-pip 24 | 25 | RUN pip install --no-cache-dir torch==2.2.0 26 | RUN pip install dgl==2.1.0 27 | RUN pip install pycairo 28 | RUN pip install pandas 29 | RUN pip install scikit-learn 30 | RUN pip install numpy==1.24.4 31 | RUN pip install pydantic 32 | 33 | WORKDIR /app 34 | RUN git clone --recursive https://github.com/ASU-VDA-Lab/ASP-DAC24-Tutorial.git 35 | 36 | WORKDIR /app/ASP-DAC24-Tutorial/OpenROAD/ 37 | RUN ./etc/DependencyInstaller.sh 38 | RUN mkdir build 39 | WORKDIR /app/ASP-DAC24-Tutorial/OpenROAD/build 40 | RUN cmake .. 41 | RUN make -j 42 | 43 | WORKDIR /app 44 | 45 | 46 | COPY . /app 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2023, ASU-VDA-Lab 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ASP-DAC24 Tutorial-8: CircuitOps and OpenROAD: Unleashing ML EDA for Research and Education 2 | This is a GitHub repository that has the scripts being demoed at the tutorial. The scripts highlight example use cases of ML EDA infrastructure. 3 | 4 | ## Background 5 | Over the last decade, there has been a significant rise in machine learning (ML)--based electronic design automation (ML-EDA) research. However, a notable challenge lies in the interaction between existing EDA tools and ML frameworks. Researchers often use TCL scripts for interaction with EDA tools and Python scripts for ML frameworks. They rely on file I/O for interaction between the EDA world and ML world or reinvent the wheel and implement the complete EDA tools in Python. Both these approaches have challenges. The first is extremely slow, and it's impossible to send data/information between EDA tools iteratively, which is a barrier to entry for non-chip designers. The second slows down research and makes it challenging to make apples-to-apples comparisons between various ML EDA algorithms. As a remedy, there is a pressing need for an ML-friendly EDA tool that seamlessly integrates with ML methodologies, reducing reliance on TCL programming and file-based information extraction. 6 | 7 | This tutorial introduces the CircuitOps and OpenROAD Python APIs, representing a significant milestone in ML-EDA research. CircuitOps is an ML-friendly data infrastructure utilizing labeled property graphs (LPGs) backed by relational tables (IR tables) to generate datasets for ML-EDA applications. The Python-compatible LPG minimizes the developmental effort required for ML-EDA research. The IR tables are generated using OpenROAD Python APIs, which offer numerous advantages over TCL APIs. One key advantage is that OpenROAD can now be directly imported into a Python environment, this not only means we can incorporate OpenROAD with other Python libraries, but we can also interact with OpenROAD in real-time, providing unprecedented flexibility in ML-EDA application development. Additionally, we can get the information directly via the Python API without file IO, which increases ML-EDA performance. 8 | 9 | ## Tutorial Contents 10 | 11 | This tutorial is composed of two hands-on sessions, each with two demos: 12 | - Session 1 : 13 | - Demo1: Introduce the OpenROAD Python APIs for an EDA flow 14 | - Demo2: Two ML-EDA examples powered by the Python APIs. 15 | - Image-based static IR Drop prediction using OpenROAD Python APIs (similar to 2023 ICCAD Contest Problem C) 16 | - RL-based sizing using OpenROAD Python APIs 17 | - Session 2 : 18 | - Demo3: Introduce CircuitOps's LPG generation and query and interaction with OpenROAD 19 | - Demo4: Use of CircuitOps to generate data for stage delay prediction 20 | 21 | ## Getting Started 22 | 23 | 24 | ### Clone the repository 25 | The Google Cloud Computing resources are not available after the tutorial session at 2024 ASP-DAC. For all users, please clone this repository, install the required software dependencies, and build OpenROAD and CircuitOps. 26 | 27 | ``` 28 | git clone --recursive https://github.com/ASU-VDA-Lab/ASP-DAC24-Tutorial 29 | ``` 30 | 31 | #### Build OpenROAD and CircuitOps 32 | 33 | ##### Option 1: Build using Docker 34 | The following technique assumes you have docker installed on your machine. If you do not have then install docker from [here](https://docs.docker.com/engine/install/). Build the docker image and run using the following commands: 35 | ``` 36 | docker build -t . 37 | docker run -it --name 38 | ``` 39 | 40 | ##### Option 2: Build locally 41 | The following technique assumes you have a machine with the required Ubuntu OS prerequisite of OpenROAD and CircuitOps. 42 | 43 | Install dependencies for OpenROAD: 44 | ``` 45 | sudo ./OpenROAD/etc/DependencyInstaller.sh 46 | ``` 47 | 48 | Install dependencies for CircuitOps and ML EDA applications: 49 | ``` 50 | sudo apt-get install -y python3-matplotlib 51 | sudo apt-get install -y nvidia-cuda-toolkit 52 | sudo apt-get update 53 | sudo apt-get install -y python3-graph-tool 54 | sudo apt-get update && apt-get install -y gnupg2 ca-certificates 55 | sudo apt-get install -y python3-pip 56 | pip3 install torch 57 | pip3 install dgl 58 | pip3 install pycairo 59 | pip3 install pandas 60 | pip3 install scikit-learn 61 | ``` 62 | 63 | Once packages have been installed, build OpenROAD: 64 | 65 | ``` 66 | cd ./OpenROAD/ 67 | mkdir build 68 | cd build 69 | cmake .. 70 | make -j 71 | ``` 72 | 73 | ## Running the Demo Scripts 74 | The following script assumes that we work from the ASPDAC2024-Tutorial directory. 75 | Before testing the script please select the tutorial directory. 76 | 77 | ``` 78 | cd 79 | ``` 80 | 81 | ### Session 1 82 | 83 | This session demonstrates OpenROAD Python APIs in ML-EDA applications. The session contains two demos: 84 | 85 | 86 | #### Demo 1 87 | 88 | The first demo shows the Python APIs to run EDA tools flow and APIs to query the OpenROAD database. These APIs can work within a regular Python shell and easily integrate with ML-EDA applications. 89 | 90 | ``` 91 | ./OpenROAD/build/src/openroad -python session1/demo1_flow.py 92 | ``` 93 | 94 | OpenROAD Python APIs circuit properties query example: 95 | 96 | ``` 97 | ./OpenROAD/build/src/openroad -python session1/demo1_query.py 98 | ``` 99 | 100 | #### Demo 2 101 | 102 | The second demo shows the Python APIs used in two ML EDA applications. The first is for IR drop prediction. The script creates a power map within OpenROAD Python interpreter and performance inference using U-Net model. The second shows the use of Python APIs with an RL gate sizing framework. The GNN takes an action to update the size of the netlist in the OpenROAD database (OpenDB) and uses OpenROAD timer (OpenSTA) to estimate the reward. 103 | 104 | 105 | To run the image-based static IR Drop prediction prediction using OpenROAD Python: 106 | 107 | ``` 108 | ./OpenROAD/build/src/openroad -python session1/demo2_IR.py 109 | ``` 110 | 111 | To run the RL-based gate sizing example using OpenROAD Python: 112 | 113 | ``` 114 | ./OpenROAD/build/src/openroad -python session1/demo2_gate_sizing.py 115 | ``` 116 | 117 | ### Session 2 118 | 119 | This session demonstrates CircuitOps data structure which is an Intermediate Representation (IR) and includes labeled property graph (LPG) backed by relational tables. 120 | 121 | 122 | #### Demo 3 123 | The first demo in this session highlights LPG creation and IR table generation using OpenROAD and it is followed by examples of using Python APIs for querying LPG. 124 | 125 | 126 | IR Tables generation using OpenROAD: 127 | 128 | ``` 129 | cd CircuitOps 130 | ../OpenROAD/build/src/openroad -python ./src/python/generate_tables.py 131 | cd ../ 132 | ``` 133 | 134 | There are two ways to create LPGs: 135 | 136 | (1) Create LPG via IR Tables 137 | 138 | ``` 139 | python3 session2/demo3_LPG_query_example.py 140 | ``` 141 | 142 | (2) Create LPG via OpenROAD Python API 143 | ``` 144 | ./OpenROAD/build/src/openroad -python session2/demo3_LPG_query_example.py --use_pd 145 | ``` 146 | 147 | #### Demo 4 148 | 149 | The second demo in this session showcases an example of using CircuitOps data representation format for easy data generation for net delay prediction. 150 | 151 | To run this example: 152 | 153 | ``` 154 | ./OpenROAD/build/src/openroad -python session2/demo4_preroute_net_delay_prediction.py 155 | ``` 156 | 157 | 158 | 159 | ## Cite this work 160 | ``` 161 | \bibitem{aspdactutorialpaper} 162 | V. A. Chhabria, W. Jiang, A. B. Kahng, R. Liang, H. Ren, S. S. Sapatnekar and B.-Y. Wu, 163 | ``OpenROAD and CircuitOps: Infrastructure for ML EDA Research and Education'', {\em Proc. VTS}, 2024, pp. 1-4. 164 | ``` 165 | -------------------------------------------------------------------------------- /designs/gcd.sdc.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ASU-VDA-Lab/ASP-DAC24-Tutorial/7d25bfab195d95d8f41a0df1567d98a8578aa788/designs/gcd.sdc.gz -------------------------------------------------------------------------------- /designs/gcd.spef.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ASU-VDA-Lab/ASP-DAC24-Tutorial/7d25bfab195d95d8f41a0df1567d98a8578aa788/designs/gcd.spef.gz -------------------------------------------------------------------------------- /platforms/lef/NangateOpenCellLibrary.tech.lef: -------------------------------------------------------------------------------- 1 | # 2 | # ****************************************************************************** 3 | # * * 4 | # * Copyright (C) 2004-2010, Nangate Inc. * 5 | # * All rights reserved. * 6 | # * * 7 | # * Nangate and the Nangate logo are trademarks of Nangate Inc. * 8 | # * * 9 | # * All trademarks, logos, software marks, and trade names (collectively the * 10 | # * "Marks") in this program are proprietary to Nangate or other respective * 11 | # * owners that have granted Nangate the right and license to use such Marks. * 12 | # * You are not permitted to use the Marks without the prior written consent * 13 | # * of Nangate or such third party that may own the Marks. * 14 | # * * 15 | # * This file has been provided pursuant to a License Agreement containing * 16 | # * restrictions on its use. This file contains valuable trade secrets and * 17 | # * proprietary information of Nangate Inc., and is protected by U.S. and * 18 | # * international laws and/or treaties. * 19 | # * * 20 | # * The copyright notice(s) in this file does not indicate actual or intended * 21 | # * publication of this file. * 22 | # * * 23 | # * NGLibraryCreator, v2010.08-HR32-SP3-2010-08-05 - build 1009061800 * 24 | # * * 25 | # ****************************************************************************** 26 | # 27 | # 28 | # Running on brazil06.nangate.com.br for user Giancarlo Franciscatto (gfr). 29 | # Local time is now Fri, 3 Dec 2010, 19:32:18. 30 | # Main process id is 27821. 31 | 32 | VERSION 5.6 ; 33 | BUSBITCHARS "[]" ; 34 | DIVIDERCHAR "/" ; 35 | 36 | UNITS 37 | DATABASE MICRONS 2000 ; 38 | END UNITS 39 | 40 | MANUFACTURINGGRID 0.0050 ; 41 | 42 | LAYER poly 43 | TYPE MASTERSLICE ; 44 | END poly 45 | 46 | LAYER active 47 | TYPE MASTERSLICE ; 48 | END active 49 | 50 | LAYER metal1 51 | TYPE ROUTING ; 52 | SPACING 0.065 ; 53 | WIDTH 0.07 ; 54 | PITCH 0.14 ; 55 | DIRECTION HORIZONTAL ; 56 | OFFSET 0.095 0.07 ; 57 | RESISTANCE RPERSQ 0.38 ; 58 | THICKNESS 0.13 ; 59 | HEIGHT 0.37 ; 60 | CAPACITANCE CPERSQDIST 7.7161e-05 ; 61 | EDGECAPACITANCE 2.7365e-05 ; 62 | END metal1 63 | 64 | LAYER via1 65 | TYPE CUT ; 66 | SPACING 0.08 ; 67 | WIDTH 0.07 ; 68 | RESISTANCE 5 ; 69 | END via1 70 | 71 | LAYER metal2 72 | TYPE ROUTING ; 73 | SPACINGTABLE 74 | PARALLELRUNLENGTH 0.0000 0.3000 0.9000 1.8000 2.7000 4.0000 75 | WIDTH 0.0000 0.0700 0.0700 0.0700 0.0700 0.0700 0.0700 76 | WIDTH 0.0900 0.0700 0.0900 0.0900 0.0900 0.0900 0.0900 77 | WIDTH 0.2700 0.0700 0.0900 0.2700 0.2700 0.2700 0.2700 78 | WIDTH 0.5000 0.0700 0.0900 0.2700 0.5000 0.5000 0.5000 79 | WIDTH 0.9000 0.0700 0.0900 0.2700 0.5000 0.9000 0.9000 80 | WIDTH 1.5000 0.0700 0.0900 0.2700 0.5000 0.9000 1.5000 ; 81 | WIDTH 0.07 ; 82 | PITCH 0.19 ; 83 | DIRECTION VERTICAL ; 84 | OFFSET 0.095 0.07 ; 85 | RESISTANCE RPERSQ 0.25 ; 86 | THICKNESS 0.14 ; 87 | HEIGHT 0.62 ; 88 | CAPACITANCE CPERSQDIST 4.0896e-05 ; 89 | EDGECAPACITANCE 2.5157e-05 ; 90 | END metal2 91 | 92 | LAYER via2 93 | TYPE CUT ; 94 | SPACING 0.09 ; 95 | WIDTH 0.07 ; 96 | RESISTANCE 5 ; 97 | END via2 98 | 99 | LAYER metal3 100 | TYPE ROUTING ; 101 | SPACINGTABLE 102 | PARALLELRUNLENGTH 0.0000 0.3000 0.9000 1.8000 2.7000 4.0000 103 | WIDTH 0.0000 0.0700 0.0700 0.0700 0.0700 0.0700 0.0700 104 | WIDTH 0.0900 0.0700 0.0900 0.0900 0.0900 0.0900 0.0900 105 | WIDTH 0.2700 0.0700 0.0900 0.2700 0.2700 0.2700 0.2700 106 | WIDTH 0.5000 0.0700 0.0900 0.2700 0.5000 0.5000 0.5000 107 | WIDTH 0.9000 0.0700 0.0900 0.2700 0.5000 0.9000 0.9000 108 | WIDTH 1.5000 0.0700 0.0900 0.2700 0.5000 0.9000 1.5000 ; 109 | WIDTH 0.07 ; 110 | PITCH 0.14 ; 111 | DIRECTION HORIZONTAL ; 112 | OFFSET 0.095 0.07 ; 113 | RESISTANCE RPERSQ 0.25 ; 114 | THICKNESS 0.14 ; 115 | HEIGHT 0.88 ; 116 | CAPACITANCE CPERSQDIST 2.7745e-05 ; 117 | EDGECAPACITANCE 2.5157e-05 ; 118 | END metal3 119 | 120 | LAYER via3 121 | TYPE CUT ; 122 | SPACING 0.09 ; 123 | WIDTH 0.07 ; 124 | RESISTANCE 5 ; 125 | END via3 126 | 127 | LAYER metal4 128 | TYPE ROUTING ; 129 | SPACINGTABLE 130 | PARALLELRUNLENGTH 0.0000 0.9000 1.8000 2.7000 4.0000 131 | WIDTH 0.0000 0.1400 0.1400 0.1400 0.1400 0.1400 132 | WIDTH 0.2700 0.1400 0.2700 0.2700 0.2700 0.2700 133 | WIDTH 0.5000 0.1400 0.2700 0.5000 0.5000 0.5000 134 | WIDTH 0.9000 0.1400 0.2700 0.5000 0.9000 0.9000 135 | WIDTH 1.5000 0.1400 0.2700 0.5000 0.9000 1.5000 ; 136 | WIDTH 0.14 ; 137 | PITCH 0.28 ; 138 | DIRECTION VERTICAL ; 139 | OFFSET 0.095 0.07 ; 140 | RESISTANCE RPERSQ 0.21 ; 141 | THICKNESS 0.28 ; 142 | HEIGHT 1.14 ; 143 | CAPACITANCE CPERSQDIST 2.0743e-05 ; 144 | EDGECAPACITANCE 3.0908e-05 ; 145 | END metal4 146 | 147 | LAYER via4 148 | TYPE CUT ; 149 | SPACING 0.16 ; 150 | WIDTH 0.14 ; 151 | RESISTANCE 3 ; 152 | END via4 153 | 154 | LAYER metal5 155 | TYPE ROUTING ; 156 | SPACINGTABLE 157 | PARALLELRUNLENGTH 0.0000 0.9000 1.8000 2.7000 4.0000 158 | WIDTH 0.0000 0.1400 0.1400 0.1400 0.1400 0.1400 159 | WIDTH 0.2700 0.1400 0.2700 0.2700 0.2700 0.2700 160 | WIDTH 0.5000 0.1400 0.2700 0.5000 0.5000 0.5000 161 | WIDTH 0.9000 0.1400 0.2700 0.5000 0.9000 0.9000 162 | WIDTH 1.5000 0.1400 0.2700 0.5000 0.9000 1.5000 ; 163 | WIDTH 0.14 ; 164 | PITCH 0.28 ; 165 | DIRECTION HORIZONTAL ; 166 | OFFSET 0.095 0.07 ; 167 | RESISTANCE RPERSQ 0.21 ; 168 | THICKNESS 0.28 ; 169 | HEIGHT 1.71 ; 170 | CAPACITANCE CPERSQDIST 1.3527e-05 ; 171 | EDGECAPACITANCE 2.3863e-06 ; 172 | END metal5 173 | 174 | LAYER via5 175 | TYPE CUT ; 176 | SPACING 0.16 ; 177 | WIDTH 0.14 ; 178 | RESISTANCE 3 ; 179 | END via5 180 | 181 | LAYER metal6 182 | TYPE ROUTING ; 183 | SPACINGTABLE 184 | PARALLELRUNLENGTH 0.0000 0.9000 1.8000 2.7000 4.0000 185 | WIDTH 0.0000 0.1400 0.1400 0.1400 0.1400 0.1400 186 | WIDTH 0.2700 0.1400 0.2700 0.2700 0.2700 0.2700 187 | WIDTH 0.5000 0.1400 0.2700 0.5000 0.5000 0.5000 188 | WIDTH 0.9000 0.1400 0.2700 0.5000 0.9000 0.9000 189 | WIDTH 1.5000 0.1400 0.2700 0.5000 0.9000 1.5000 ; 190 | WIDTH 0.14 ; 191 | PITCH 0.28 ; 192 | DIRECTION VERTICAL ; 193 | OFFSET 0.095 0.07 ; 194 | RESISTANCE RPERSQ 0.21 ; 195 | THICKNESS 0.28 ; 196 | HEIGHT 2.28 ; 197 | CAPACITANCE CPERSQDIST 1.0036e-05 ; 198 | EDGECAPACITANCE 2.3863e-05 ; 199 | END metal6 200 | 201 | LAYER via6 202 | TYPE CUT ; 203 | SPACING 0.16 ; 204 | WIDTH 0.14 ; 205 | RESISTANCE 3 ; 206 | END via6 207 | 208 | LAYER metal7 209 | TYPE ROUTING ; 210 | SPACINGTABLE 211 | PARALLELRUNLENGTH 0.0000 1.8000 2.7000 4.0000 212 | WIDTH 0.0000 0.4000 0.4000 0.4000 0.4000 213 | WIDTH 0.5000 0.4000 0.5000 0.5000 0.5000 214 | WIDTH 0.9000 0.4000 0.5000 0.9000 0.9000 215 | WIDTH 1.5000 0.4000 0.5000 0.9000 1.5000 ; 216 | WIDTH 0.4 ; 217 | PITCH 0.8 ; 218 | DIRECTION HORIZONTAL ; 219 | OFFSET 0.095 0.07 ; 220 | RESISTANCE RPERSQ 0.075 ; 221 | THICKNESS 0.8 ; 222 | HEIGHT 2.85 ; 223 | CAPACITANCE CPERSQDIST 7.9771e-06 ; 224 | EDGECAPACITANCE 3.2577e-05 ; 225 | END metal7 226 | 227 | LAYER via7 228 | TYPE CUT ; 229 | SPACING 0.44 ; 230 | WIDTH 0.4 ; 231 | RESISTANCE 1 ; 232 | END via7 233 | 234 | LAYER metal8 235 | TYPE ROUTING ; 236 | SPACINGTABLE 237 | PARALLELRUNLENGTH 0.0000 1.8000 2.7000 4.0000 238 | WIDTH 0.0000 0.4000 0.4000 0.4000 0.4000 239 | WIDTH 0.5000 0.4000 0.5000 0.5000 0.5000 240 | WIDTH 0.9000 0.4000 0.5000 0.9000 0.9000 241 | WIDTH 1.5000 0.4000 0.5000 0.9000 1.5000 ; 242 | WIDTH 0.4 ; 243 | PITCH 0.8 ; 244 | DIRECTION VERTICAL ; 245 | OFFSET 0.095 0.07 ; 246 | RESISTANCE RPERSQ 0.075 ; 247 | THICKNESS 0.8 ; 248 | HEIGHT 4.47 ; 249 | CAPACITANCE CPERSQDIST 5.0391e-06 ; 250 | EDGECAPACITANCE 2.3932e-05 ; 251 | END metal8 252 | 253 | LAYER via8 254 | TYPE CUT ; 255 | SPACING 0.44 ; 256 | WIDTH 0.4 ; 257 | RESISTANCE 1 ; 258 | END via8 259 | 260 | LAYER metal9 261 | TYPE ROUTING ; 262 | SPACINGTABLE 263 | PARALLELRUNLENGTH 0.0000 2.7000 4.0000 264 | WIDTH 0.0000 0.8000 0.8000 0.8000 265 | WIDTH 0.9000 0.8000 0.9000 0.9000 266 | WIDTH 1.5000 0.8000 0.9000 1.5000 ; 267 | WIDTH 0.8 ; 268 | PITCH 1.6 ; 269 | DIRECTION HORIZONTAL ; 270 | OFFSET 0.095 0.07 ; 271 | RESISTANCE RPERSQ 0.03 ; 272 | THICKNESS 2 ; 273 | HEIGHT 6.09 ; 274 | CAPACITANCE CPERSQDIST 3.6827e-06 ; 275 | EDGECAPACITANCE 3.0803e-05 ; 276 | END metal9 277 | 278 | LAYER via9 279 | TYPE CUT ; 280 | SPACING 0.88 ; 281 | WIDTH 0.8 ; 282 | RESISTANCE 0.5 ; 283 | END via9 284 | 285 | LAYER metal10 286 | TYPE ROUTING ; 287 | SPACINGTABLE 288 | PARALLELRUNLENGTH 0.0000 2.7000 4.0000 289 | WIDTH 0.0000 0.8000 0.8000 0.8000 290 | WIDTH 0.9000 0.8000 0.9000 0.9000 291 | WIDTH 1.5000 0.8000 0.9000 1.5000 ; 292 | WIDTH 0.8 ; 293 | PITCH 1.6 ; 294 | DIRECTION VERTICAL ; 295 | OFFSET 0.095 0.07 ; 296 | RESISTANCE RPERSQ 0.03 ; 297 | THICKNESS 2 ; 298 | HEIGHT 10.09 ; 299 | CAPACITANCE CPERSQDIST 2.2124e-06 ; 300 | EDGECAPACITANCE 2.3667e-05 ; 301 | END metal10 302 | 303 | LAYER OVERLAP 304 | TYPE OVERLAP ; 305 | END OVERLAP 306 | 307 | VIA via1_4 DEFAULT 308 | LAYER via1 ; 309 | RECT -0.035 -0.035 0.035 0.035 ; 310 | LAYER metal1 ; 311 | RECT -0.035 -0.07 0.035 0.07 ; 312 | LAYER metal2 ; 313 | RECT -0.035 -0.07 0.035 0.07 ; 314 | END via1_4 315 | 316 | VIA via1_0 DEFAULT 317 | LAYER via1 ; 318 | RECT -0.035 -0.035 0.035 0.035 ; 319 | LAYER metal1 ; 320 | RECT -0.07 -0.07 0.07 0.07 ; 321 | LAYER metal2 ; 322 | RECT -0.07 -0.07 0.07 0.07 ; 323 | END via1_0 324 | 325 | VIA via1_1 DEFAULT 326 | LAYER via1 ; 327 | RECT -0.035 -0.035 0.035 0.035 ; 328 | LAYER metal1 ; 329 | RECT -0.07 -0.07 0.07 0.07 ; 330 | LAYER metal2 ; 331 | RECT -0.035 -0.07 0.035 0.07 ; 332 | END via1_1 333 | 334 | VIA via1_2 DEFAULT 335 | LAYER via1 ; 336 | RECT -0.035 -0.035 0.035 0.035 ; 337 | LAYER metal1 ; 338 | RECT -0.07 -0.07 0.07 0.07 ; 339 | LAYER metal2 ; 340 | RECT -0.07 -0.035 0.07 0.035 ; 341 | END via1_2 342 | 343 | VIA via1_3 DEFAULT 344 | LAYER via1 ; 345 | RECT -0.035 -0.035 0.035 0.035 ; 346 | LAYER metal1 ; 347 | RECT -0.035 -0.07 0.035 0.07 ; 348 | LAYER metal2 ; 349 | RECT -0.07 -0.07 0.07 0.07 ; 350 | END via1_3 351 | 352 | VIA via1_5 DEFAULT 353 | LAYER via1 ; 354 | RECT -0.035 -0.035 0.035 0.035 ; 355 | LAYER metal1 ; 356 | RECT -0.035 -0.07 0.035 0.07 ; 357 | LAYER metal2 ; 358 | RECT -0.07 -0.035 0.07 0.035 ; 359 | END via1_5 360 | 361 | VIA via1_6 DEFAULT 362 | LAYER via1 ; 363 | RECT -0.035 -0.035 0.035 0.035 ; 364 | LAYER metal1 ; 365 | RECT -0.07 -0.035 0.07 0.035 ; 366 | LAYER metal2 ; 367 | RECT -0.07 -0.07 0.07 0.07 ; 368 | END via1_6 369 | 370 | VIA via1_7 DEFAULT 371 | LAYER via1 ; 372 | RECT -0.035 -0.035 0.035 0.035 ; 373 | LAYER metal1 ; 374 | RECT -0.07 -0.035 0.07 0.035 ; 375 | LAYER metal2 ; 376 | RECT -0.035 -0.07 0.035 0.07 ; 377 | END via1_7 378 | 379 | VIA via1_8 DEFAULT 380 | LAYER via1 ; 381 | RECT -0.035 -0.035 0.035 0.035 ; 382 | LAYER metal1 ; 383 | RECT -0.07 -0.035 0.07 0.035 ; 384 | LAYER metal2 ; 385 | RECT -0.07 -0.035 0.07 0.035 ; 386 | END via1_8 387 | 388 | VIA via2_8 DEFAULT 389 | LAYER via2 ; 390 | RECT -0.035 -0.035 0.035 0.035 ; 391 | LAYER metal2 ; 392 | RECT -0.07 -0.035 0.07 0.035 ; 393 | LAYER metal3 ; 394 | RECT -0.07 -0.035 0.07 0.035 ; 395 | END via2_8 396 | 397 | VIA via2_4 DEFAULT 398 | LAYER via2 ; 399 | RECT -0.035 -0.035 0.035 0.035 ; 400 | LAYER metal2 ; 401 | RECT -0.035 -0.07 0.035 0.07 ; 402 | LAYER metal3 ; 403 | RECT -0.035 -0.07 0.035 0.07 ; 404 | END via2_4 405 | 406 | VIA via2_5 DEFAULT 407 | LAYER via2 ; 408 | RECT -0.035 -0.035 0.035 0.035 ; 409 | LAYER metal2 ; 410 | RECT -0.035 -0.07 0.035 0.07 ; 411 | LAYER metal3 ; 412 | RECT -0.07 -0.035 0.07 0.035 ; 413 | END via2_5 414 | 415 | VIA via2_7 DEFAULT 416 | LAYER via2 ; 417 | RECT -0.035 -0.035 0.035 0.035 ; 418 | LAYER metal2 ; 419 | RECT -0.07 -0.035 0.07 0.035 ; 420 | LAYER metal3 ; 421 | RECT -0.035 -0.07 0.035 0.07 ; 422 | END via2_7 423 | 424 | VIA via2_6 DEFAULT 425 | LAYER via2 ; 426 | RECT -0.035 -0.035 0.035 0.035 ; 427 | LAYER metal2 ; 428 | RECT -0.07 -0.035 0.07 0.035 ; 429 | LAYER metal3 ; 430 | RECT -0.07 -0.07 0.07 0.07 ; 431 | END via2_6 432 | 433 | VIA via2_0 DEFAULT 434 | LAYER via2 ; 435 | RECT -0.035 -0.035 0.035 0.035 ; 436 | LAYER metal2 ; 437 | RECT -0.07 -0.07 0.07 0.07 ; 438 | LAYER metal3 ; 439 | RECT -0.07 -0.07 0.07 0.07 ; 440 | END via2_0 441 | 442 | VIA via2_1 DEFAULT 443 | LAYER via2 ; 444 | RECT -0.035 -0.035 0.035 0.035 ; 445 | LAYER metal2 ; 446 | RECT -0.07 -0.07 0.07 0.07 ; 447 | LAYER metal3 ; 448 | RECT -0.035 -0.07 0.035 0.07 ; 449 | END via2_1 450 | 451 | VIA via2_2 DEFAULT 452 | LAYER via2 ; 453 | RECT -0.035 -0.035 0.035 0.035 ; 454 | LAYER metal2 ; 455 | RECT -0.07 -0.07 0.07 0.07 ; 456 | LAYER metal3 ; 457 | RECT -0.07 -0.035 0.07 0.035 ; 458 | END via2_2 459 | 460 | VIA via2_3 DEFAULT 461 | LAYER via2 ; 462 | RECT -0.035 -0.035 0.035 0.035 ; 463 | LAYER metal2 ; 464 | RECT -0.035 -0.07 0.035 0.07 ; 465 | LAYER metal3 ; 466 | RECT -0.07 -0.07 0.07 0.07 ; 467 | END via2_3 468 | 469 | VIA via3_2 DEFAULT 470 | LAYER via3 ; 471 | RECT -0.035 -0.035 0.035 0.035 ; 472 | LAYER metal3 ; 473 | RECT -0.07 -0.035 0.07 0.035 ; 474 | LAYER metal4 ; 475 | RECT -0.07 -0.07 0.07 0.07 ; 476 | END via3_2 477 | 478 | VIA via3_0 DEFAULT 479 | LAYER via3 ; 480 | RECT -0.035 -0.035 0.035 0.035 ; 481 | LAYER metal3 ; 482 | RECT -0.07 -0.07 0.07 0.07 ; 483 | LAYER metal4 ; 484 | RECT -0.07 -0.07 0.07 0.07 ; 485 | END via3_0 486 | 487 | VIA via3_1 DEFAULT 488 | LAYER via3 ; 489 | RECT -0.035 -0.035 0.035 0.035 ; 490 | LAYER metal3 ; 491 | RECT -0.035 -0.07 0.035 0.07 ; 492 | LAYER metal4 ; 493 | RECT -0.07 -0.07 0.07 0.07 ; 494 | END via3_1 495 | 496 | VIA via4_0 DEFAULT 497 | LAYER via4 ; 498 | RECT -0.07 -0.07 0.07 0.07 ; 499 | LAYER metal4 ; 500 | RECT -0.07 -0.07 0.07 0.07 ; 501 | LAYER metal5 ; 502 | RECT -0.07 -0.07 0.07 0.07 ; 503 | END via4_0 504 | 505 | VIA via5_0 DEFAULT 506 | LAYER via5 ; 507 | RECT -0.07 -0.07 0.07 0.07 ; 508 | LAYER metal5 ; 509 | RECT -0.07 -0.07 0.07 0.07 ; 510 | LAYER metal6 ; 511 | RECT -0.07 -0.07 0.07 0.07 ; 512 | END via5_0 513 | 514 | VIA via6_0 DEFAULT 515 | LAYER via6 ; 516 | RECT -0.07 -0.07 0.07 0.07 ; 517 | LAYER metal6 ; 518 | RECT -0.07 -0.07 0.07 0.07 ; 519 | LAYER metal7 ; 520 | RECT -0.2 -0.2 0.2 0.2 ; 521 | END via6_0 522 | 523 | VIA via7_0 DEFAULT 524 | LAYER via7 ; 525 | RECT -0.2 -0.2 0.2 0.2 ; 526 | LAYER metal7 ; 527 | RECT -0.2 -0.2 0.2 0.2 ; 528 | LAYER metal8 ; 529 | RECT -0.2 -0.2 0.2 0.2 ; 530 | END via7_0 531 | 532 | VIA via8_0 DEFAULT 533 | LAYER via8 ; 534 | RECT -0.2 -0.2 0.2 0.2 ; 535 | LAYER metal8 ; 536 | RECT -0.2 -0.2 0.2 0.2 ; 537 | LAYER metal9 ; 538 | RECT -0.4 -0.4 0.4 0.4 ; 539 | END via8_0 540 | 541 | VIA via9_0 DEFAULT 542 | LAYER via9 ; 543 | RECT -0.4 -0.4 0.4 0.4 ; 544 | LAYER metal9 ; 545 | RECT -0.4 -0.4 0.4 0.4 ; 546 | LAYER metal10 ; 547 | RECT -0.4 -0.4 0.4 0.4 ; 548 | END via9_0 549 | 550 | VIARULE Via1Array-0 GENERATE 551 | LAYER metal1 ; 552 | ENCLOSURE 0.035 0.035 ; 553 | LAYER metal2 ; 554 | ENCLOSURE 0.035 0.035 ; 555 | LAYER via1 ; 556 | RECT -0.035 -0.035 0.035 0.035 ; 557 | SPACING 0.15 BY 0.15 ; 558 | END Via1Array-0 559 | 560 | VIARULE Via1Array-1 GENERATE 561 | LAYER metal1 ; 562 | ENCLOSURE 0 0.035 ; 563 | LAYER metal2 ; 564 | ENCLOSURE 0 0.035 ; 565 | LAYER via1 ; 566 | RECT -0.035 -0.035 0.035 0.035 ; 567 | SPACING 0.15 BY 0.15 ; 568 | END Via1Array-1 569 | 570 | VIARULE Via1Array-2 GENERATE 571 | LAYER metal1 ; 572 | ENCLOSURE 0.035 0 ; 573 | LAYER metal2 ; 574 | ENCLOSURE 0.035 0 ; 575 | LAYER via1 ; 576 | RECT -0.035 -0.035 0.035 0.035 ; 577 | SPACING 0.15 BY 0.15 ; 578 | END Via1Array-2 579 | 580 | VIARULE Via1Array-3 GENERATE 581 | LAYER metal1 ; 582 | ENCLOSURE 0 0.035 ; 583 | LAYER metal2 ; 584 | ENCLOSURE 0.035 0 ; 585 | LAYER via1 ; 586 | RECT -0.035 -0.035 0.035 0.035 ; 587 | SPACING 0.15 BY 0.15 ; 588 | END Via1Array-3 589 | 590 | VIARULE Via1Array-4 GENERATE 591 | LAYER metal1 ; 592 | ENCLOSURE 0.035 0 ; 593 | LAYER metal2 ; 594 | ENCLOSURE 0 0.035 ; 595 | LAYER via1 ; 596 | RECT -0.035 -0.035 0.035 0.035 ; 597 | SPACING 0.15 BY 0.15 ; 598 | END Via1Array-4 599 | 600 | VIARULE Via2Array-0 GENERATE 601 | LAYER metal2 ; 602 | ENCLOSURE 0.035 0.035 ; 603 | LAYER metal3 ; 604 | ENCLOSURE 0.035 0.035 ; 605 | LAYER via2 ; 606 | RECT -0.035 -0.035 0.035 0.035 ; 607 | SPACING 0.16 BY 0.16 ; 608 | END Via2Array-0 609 | 610 | VIARULE Via2Array-1 GENERATE 611 | LAYER metal2 ; 612 | ENCLOSURE 0 0.035 ; 613 | LAYER metal3 ; 614 | ENCLOSURE 0 0.035 ; 615 | LAYER via2 ; 616 | RECT -0.035 -0.035 0.035 0.035 ; 617 | SPACING 0.16 BY 0.16 ; 618 | END Via2Array-1 619 | 620 | VIARULE Via2Array-2 GENERATE 621 | LAYER metal2 ; 622 | ENCLOSURE 0.035 0 ; 623 | LAYER metal3 ; 624 | ENCLOSURE 0.035 0 ; 625 | LAYER via2 ; 626 | RECT -0.035 -0.035 0.035 0.035 ; 627 | SPACING 0.16 BY 0.16 ; 628 | END Via2Array-2 629 | 630 | VIARULE Via2Array-3 GENERATE 631 | LAYER metal2 ; 632 | ENCLOSURE 0 0.035 ; 633 | LAYER metal3 ; 634 | ENCLOSURE 0.035 0 ; 635 | LAYER via2 ; 636 | RECT -0.035 -0.035 0.035 0.035 ; 637 | SPACING 0.16 BY 0.16 ; 638 | END Via2Array-3 639 | 640 | VIARULE Via2Array-4 GENERATE 641 | LAYER metal2 ; 642 | ENCLOSURE 0.035 0 ; 643 | LAYER metal3 ; 644 | ENCLOSURE 0 0.035 ; 645 | LAYER via2 ; 646 | RECT -0.035 -0.035 0.035 0.035 ; 647 | SPACING 0.16 BY 0.16 ; 648 | END Via2Array-4 649 | 650 | VIARULE Via3Array-0 GENERATE 651 | LAYER metal3 ; 652 | ENCLOSURE 0.035 0.035 ; 653 | LAYER metal4 ; 654 | ENCLOSURE 0.035 0.035 ; 655 | LAYER via3 ; 656 | RECT -0.035 -0.035 0.035 0.035 ; 657 | SPACING 0.16 BY 0.16 ; 658 | END Via3Array-0 659 | 660 | VIARULE Via3Array-1 GENERATE 661 | LAYER metal3 ; 662 | ENCLOSURE 0 0.035 ; 663 | LAYER metal4 ; 664 | ENCLOSURE 0.035 0.035 ; 665 | LAYER via3 ; 666 | RECT -0.035 -0.035 0.035 0.035 ; 667 | SPACING 0.16 BY 0.16 ; 668 | END Via3Array-1 669 | 670 | VIARULE Via3Array-2 GENERATE 671 | LAYER metal3 ; 672 | ENCLOSURE 0.035 0 ; 673 | LAYER metal4 ; 674 | ENCLOSURE 0.035 0.035 ; 675 | LAYER via3 ; 676 | RECT -0.035 -0.035 0.035 0.035 ; 677 | SPACING 0.16 BY 0.16 ; 678 | END Via3Array-2 679 | 680 | VIARULE Via4Array-0 GENERATE 681 | LAYER metal4 ; 682 | ENCLOSURE 0 0 ; 683 | LAYER metal5 ; 684 | ENCLOSURE 0 0 ; 685 | LAYER via4 ; 686 | RECT -0.07 -0.07 0.07 0.07 ; 687 | SPACING 0.3 BY 0.3 ; 688 | END Via4Array-0 689 | 690 | VIARULE Via5Array-0 GENERATE 691 | LAYER metal5 ; 692 | ENCLOSURE 0 0 ; 693 | LAYER metal6 ; 694 | ENCLOSURE 0 0 ; 695 | LAYER via5 ; 696 | RECT -0.07 -0.07 0.07 0.07 ; 697 | SPACING 0.3 BY 0.3 ; 698 | END Via5Array-0 699 | 700 | VIARULE Via6Array-0 GENERATE 701 | LAYER metal6 ; 702 | ENCLOSURE 0 0 ; 703 | LAYER metal7 ; 704 | ENCLOSURE 0.13 0.13 ; 705 | LAYER via6 ; 706 | RECT -0.07 -0.07 0.07 0.07 ; 707 | SPACING 0.3 BY 0.3 ; 708 | END Via6Array-0 709 | 710 | VIARULE Via7Array-0 GENERATE 711 | LAYER metal7 ; 712 | ENCLOSURE 0 0 ; 713 | LAYER metal8 ; 714 | ENCLOSURE 0 0 ; 715 | LAYER via7 ; 716 | RECT -0.2 -0.2 0.2 0.2 ; 717 | SPACING 0.84 BY 0.84 ; 718 | END Via7Array-0 719 | 720 | VIARULE Via8Array-0 GENERATE 721 | LAYER metal8 ; 722 | ENCLOSURE 0 0 ; 723 | LAYER metal9 ; 724 | ENCLOSURE 0.2 0.2 ; 725 | LAYER via8 ; 726 | RECT -0.2 -0.2 0.2 0.2 ; 727 | SPACING 0.84 BY 0.84 ; 728 | END Via8Array-0 729 | 730 | VIARULE Via9Array-0 GENERATE 731 | LAYER metal10 ; 732 | ENCLOSURE 0 0 ; 733 | LAYER metal9 ; 734 | ENCLOSURE 0 0 ; 735 | LAYER via9 ; 736 | RECT -0.4 -0.4 0.4 0.4 ; 737 | SPACING 1.68 BY 1.68 ; 738 | END Via9Array-0 739 | 740 | SPACING 741 | SAMENET metal1 metal1 0.065 ; 742 | SAMENET metal2 metal2 0.07 ; 743 | SAMENET metal3 metal3 0.07 ; 744 | SAMENET metal4 metal4 0.14 ; 745 | SAMENET metal5 metal5 0.14 ; 746 | SAMENET metal6 metal6 0.14 ; 747 | SAMENET metal7 metal7 0.4 ; 748 | SAMENET metal8 metal8 0.4 ; 749 | SAMENET metal9 metal9 0.8 ; 750 | SAMENET metal10 metal10 0.8 ; 751 | SAMENET via1 via1 0.08 ; 752 | SAMENET via2 via2 0.09 ; 753 | SAMENET via3 via3 0.09 ; 754 | SAMENET via4 via4 0.16 ; 755 | SAMENET via5 via5 0.16 ; 756 | SAMENET via6 via6 0.16 ; 757 | SAMENET via7 via7 0.44 ; 758 | SAMENET via8 via8 0.44 ; 759 | SAMENET via9 via9 0.88 ; 760 | SAMENET via1 via2 0.0 STACK ; 761 | SAMENET via2 via3 0.0 STACK ; 762 | SAMENET via3 via4 0.0 STACK ; 763 | SAMENET via4 via5 0.0 STACK ; 764 | SAMENET via5 via6 0.0 STACK ; 765 | SAMENET via6 via7 0.0 STACK ; 766 | SAMENET via7 via8 0.0 STACK ; 767 | SAMENET via8 via9 0.0 STACK ; 768 | END SPACING 769 | 770 | SITE FreePDK45_38x28_10R_NP_162NW_34O 771 | SYMMETRY y ; 772 | CLASS core ; 773 | SIZE 0.19 BY 1.4 ; 774 | END FreePDK45_38x28_10R_NP_162NW_34O 775 | 776 | END LIBRARY 777 | # 778 | # End of file 779 | # 780 | -------------------------------------------------------------------------------- /platforms/preprocessed_cell_dictionary.json: -------------------------------------------------------------------------------- 1 | { 2 | "0": { 3 | "c_in": [ 4 | 0.918145, 5 | 1.656515, 6 | 3.195354 7 | ], 8 | "n_sizes": 3, 9 | "name": "AND2", 10 | "out_pin": "/ZN", 11 | "sizes": [ 12 | "_X1", 13 | "_X2", 14 | "_X4" 15 | ], 16 | "sizesi": [ 17 | 1, 18 | 2, 19 | 4 20 | ] 21 | }, 22 | "1": { 23 | "c_in": [ 24 | 0.879747, 25 | 1.599415, 26 | 3.085124 27 | ], 28 | "n_sizes": 3, 29 | "name": "AND3", 30 | "out_pin": "/ZN", 31 | "sizes": [ 32 | "_X1", 33 | "_X2", 34 | "_X4" 35 | ], 36 | "sizesi": [ 37 | 1, 38 | 2, 39 | 4 40 | ] 41 | }, 42 | "2": { 43 | "c_in": [ 44 | 0.856528, 45 | 1.550933, 46 | 3.014616 47 | ], 48 | "n_sizes": 3, 49 | "name": "AND4", 50 | "out_pin": "/ZN", 51 | "sizes": [ 52 | "_X1", 53 | "_X2", 54 | "_X4" 55 | ], 56 | "sizesi": [ 57 | 1, 58 | 2, 59 | 4 60 | ] 61 | }, 62 | "4": { 63 | "c_in": [ 64 | 1.626352, 65 | 3.129761, 66 | 6.139254 67 | ], 68 | "n_sizes": 3, 69 | "name": "AOI21", 70 | "out_pin": "/ZN", 71 | "sizes": [ 72 | "_X1", 73 | "_X2", 74 | "_X4" 75 | ], 76 | "sizesi": [ 77 | 1, 78 | 2, 79 | 4 80 | ] 81 | }, 82 | "5": { 83 | "c_in": [ 84 | 1.58401, 85 | 2.987578, 86 | 6.090127 87 | ], 88 | "n_sizes": 3, 89 | "name": "AOI22", 90 | "out_pin": "/ZN", 91 | "sizes": [ 92 | "_X1", 93 | "_X2", 94 | "_X4" 95 | ], 96 | "sizesi": [ 97 | 1, 98 | 2, 99 | 4 100 | ] 101 | }, 102 | "6": { 103 | "c_in": [ 104 | 1.620338, 105 | 3.11422, 106 | 1.632665 107 | ], 108 | "n_sizes": 2, 109 | "name": "AOI211", 110 | "out_pin": "/ZN", 111 | "sizes": [ 112 | "_X1", 113 | "_X2", 114 | "_X4" 115 | ], 116 | "sizesi": [ 117 | 1, 118 | 2, 119 | 4 120 | ] 121 | }, 122 | "7": { 123 | "c_in": [ 124 | 1.58162, 125 | 3.135271, 126 | 1.557194 127 | ], 128 | "n_sizes": 2, 129 | "name": "AOI221", 130 | "out_pin": "/ZN", 131 | "sizes": [ 132 | "_X1", 133 | "_X2", 134 | "_X4" 135 | ], 136 | "sizesi": [ 137 | 1, 138 | 2, 139 | 4 140 | ] 141 | }, 142 | "8": { 143 | "c_in": [ 144 | 1.547208 145 | ], 146 | "n_sizes": 1, 147 | "name": "AOI222", 148 | "out_pin": "/ZN", 149 | "sizes": [ 150 | "_X1" 151 | ], 152 | "sizesi": [ 153 | 1 154 | ] 155 | }, 156 | "9": { 157 | "c_in": [ 158 | 0.974659, 159 | 1.779209, 160 | 3.401892, 161 | 6.585178, 162 | 12.410827, 163 | 26.703923 164 | ], 165 | "n_sizes": 6, 166 | "name": "BUF", 167 | "out_pin": "/Z", 168 | "sizes": [ 169 | "_X1", 170 | "_X2", 171 | "_X4", 172 | "_X8", 173 | "_X16", 174 | "_X32" 175 | ], 176 | "sizesi": [ 177 | 1, 178 | 2, 179 | 4, 180 | 8, 181 | 16, 182 | 32 183 | ] 184 | }, 185 | "10": { 186 | "c_in": [ 187 | 0.779830, 188 | 1.405914, 189 | 1.421162 190 | ], 191 | "n_sizes": 3, 192 | "name": "CLKBUF", 193 | "out_pin": "/Z", 194 | "sizes": [ 195 | "_X1", 196 | "_X2", 197 | "_X3" 198 | ], 199 | "sizesi": [ 200 | 1, 201 | 2, 202 | 3 203 | ] 204 | }, 205 | "14": { 206 | "c_in": [ 207 | 1.128277, 208 | 1.128370 209 | ], 210 | "n_sizes": 2, 211 | "name": "DFFR", 212 | "out_pin": "/Q", 213 | "sizes": [ 214 | "_X1", 215 | "_X2" 216 | ], 217 | "sizesi": [ 218 | 1, 219 | 2 220 | ] 221 | }, 222 | "15": { 223 | "c_in": [ 224 | 1.163714, 225 | 1.163041 226 | ], 227 | "n_sizes": 2, 228 | "name": "DFFS", 229 | "out_pin": "/Q", 230 | "sizes": [ 231 | "_X1", 232 | "_X2" 233 | ], 234 | "sizesi": [ 235 | 1, 236 | 2 237 | ] 238 | }, 239 | 240 | "16": { 241 | "c_in": [ 242 | 1.14029, 243 | 1.1276 244 | ], 245 | "n_sizes": 2, 246 | "name": "DFF", 247 | "out_pin": "/Q", 248 | "sizes": [ 249 | "_X1", 250 | "_X2" 251 | ], 252 | "sizesi": [ 253 | 1, 254 | 2 255 | ] 256 | }, 257 | 258 | "18": { 259 | "c_in": [ 260 | 0.8830, 261 | 1.1305 262 | ], 263 | "n_sizes": 2, 264 | "name": "DLL", 265 | "out_pin": "/Q", 266 | "sizes": [ 267 | "_X1", 268 | "_X2" 269 | ], 270 | "sizesi": [ 271 | 1, 272 | 2 273 | ] 274 | }, 275 | 276 | "19": { 277 | "c_in": [ 278 | 3.7457 279 | ], 280 | "n_sizes": 1, 281 | "name": "FA", 282 | "out_pin": "/CO", 283 | "sizes": [ 284 | "_X1" 285 | ], 286 | "sizesi": [ 287 | 1 288 | ] 289 | }, 290 | "21": { 291 | "c_in": [ 292 | 3.1858 293 | ], 294 | "n_sizes": 1, 295 | "name": "HA", 296 | "out_pin": "/CO", 297 | "sizes": [ 298 | "_X1" 299 | ], 300 | "sizesi": [ 301 | 1 302 | ] 303 | }, 304 | 305 | "22": { 306 | "c_in": [ 307 | 1.70023, 308 | 3.250891, 309 | 6.258425, 310 | 11.810652, 311 | 25.228138, 312 | 49.191468 313 | ], 314 | "n_sizes": 6, 315 | "name": "INV", 316 | "out_pin": "/ZN", 317 | "sizes": [ 318 | "_X1", 319 | "_X2", 320 | "_X4", 321 | "_X8", 322 | "_X16", 323 | "_X32" 324 | ], 325 | "sizesi": [ 326 | 1, 327 | 2, 328 | 4, 329 | 8, 330 | 16, 331 | 32 332 | ] 333 | }, 334 | "23": { 335 | "c_in": [ 336 | 0 337 | ], 338 | "n_sizes": 1, 339 | "name": "LOGIC0", 340 | "out_pin": "/Z", 341 | "sizes": [ 342 | "_X1" 343 | ], 344 | "sizesi": [ 345 | 1 346 | ] 347 | }, 348 | "24": { 349 | "c_in": [ 350 | 0 351 | ], 352 | "n_sizes": 1, 353 | "name": "LOGIC1", 354 | "out_pin": "/Z", 355 | "sizes": [ 356 | "_X1" 357 | ], 358 | "sizesi": [ 359 | 1 360 | ] 361 | }, 362 | 363 | "25": { 364 | "c_in": [ 365 | 0.944775, 366 | 1.592289 367 | ], 368 | "n_sizes": 2, 369 | "name": "MUX2", 370 | "out_pin": "/Z", 371 | "sizes": [ 372 | "_X1", 373 | "_X2" 374 | ], 375 | "sizesi": [ 376 | 1, 377 | 2 378 | ] 379 | }, 380 | "26": { 381 | "c_in": [ 382 | 1.599032, 383 | 3.053103, 384 | 5.954965 385 | ], 386 | "n_sizes": 3, 387 | "name": "NAND2", 388 | "out_pin": "/ZN", 389 | "sizes": [ 390 | "_X1", 391 | "_X2", 392 | "_X4" 393 | ], 394 | "sizesi": [ 395 | 1, 396 | 2, 397 | 4 398 | ] 399 | }, 400 | "27": { 401 | "c_in": [ 402 | 1.590286, 403 | 2.977805, 404 | 6.251026 405 | ], 406 | "n_sizes": 3, 407 | "name": "NAND3", 408 | "out_pin": "/ZN", 409 | "sizes": [ 410 | "_X1", 411 | "_X2", 412 | "_X4" 413 | ], 414 | "sizesi": [ 415 | 1, 416 | 2, 417 | 4 418 | ] 419 | }, 420 | "28": { 421 | "c_in": [ 422 | 1.522092, 423 | 2.922235, 424 | 5.652047 425 | ], 426 | "n_sizes": 3, 427 | "name": "NAND4", 428 | "out_pin": "/ZN", 429 | "sizes": [ 430 | "_X1", 431 | "_X2", 432 | "_X4" 433 | ], 434 | "sizesi": [ 435 | 1, 436 | 2, 437 | 4 438 | ] 439 | }, 440 | "29": { 441 | "c_in": [ 442 | 1.651345, 443 | 3.293307, 444 | 6.683366 445 | ], 446 | "n_sizes": 3, 447 | "name": "NOR2", 448 | "out_pin": "/ZN", 449 | "sizes": [ 450 | "_X1", 451 | "_X2", 452 | "_X4" 453 | ], 454 | "sizesi": [ 455 | 1, 456 | 2, 457 | 4 458 | ] 459 | }, 460 | "30": { 461 | "c_in": [ 462 | 1.616298, 463 | 3.365192, 464 | 6.105358 465 | ], 466 | "n_sizes": 3, 467 | "name": "NOR3", 468 | "out_pin": "/ZN", 469 | "sizes": [ 470 | "_X1", 471 | "_X2", 472 | "_X4" 473 | ], 474 | "sizesi": [ 475 | 1, 476 | 2, 477 | 4 478 | ] 479 | }, 480 | "31": { 481 | "c_in": [ 482 | 1.60595, 483 | 3.390672, 484 | 6.026564 485 | ], 486 | "n_sizes": 3, 487 | "name": "NOR4", 488 | "out_pin": "/ZN", 489 | "sizes": [ 490 | "_X1", 491 | "_X2", 492 | "_X4" 493 | ], 494 | "sizesi": [ 495 | 1, 496 | 2, 497 | 4 498 | ] 499 | }, 500 | "32": { 501 | "c_in": [ 502 | 1.571893, 503 | 3.100793, 504 | 6.194658 505 | ], 506 | "n_sizes": 3, 507 | "name": "OAI21", 508 | "out_pin": "/ZN", 509 | "sizes": [ 510 | "_X1", 511 | "_X2", 512 | "_X4" 513 | ], 514 | "sizesi": [ 515 | 1, 516 | 2, 517 | 4 518 | ] 519 | }, 520 | "33": { 521 | "c_in": [ 522 | 1.555656, 523 | 3.132973, 524 | 6.448126 525 | ], 526 | "n_sizes": 3, 527 | "name": "OAI22", 528 | "out_pin": "/ZN", 529 | "sizes": [ 530 | "_X1", 531 | "_X2", 532 | "_X4" 533 | ], 534 | "sizesi": [ 535 | 1, 536 | 2, 537 | 4 538 | ] 539 | }, 540 | "34": { 541 | "c_in": [ 542 | 1.679872 543 | ], 544 | "n_sizes": 1, 545 | "name": "OAI33", 546 | "out_pin": "/ZN", 547 | "sizes": [ 548 | "_X1" 549 | ], 550 | "sizesi": [ 551 | 1 552 | ] 553 | }, 554 | "35": { 555 | "c_in": [ 556 | 1.657276, 557 | 2.99551, 558 | 6.206207 559 | ], 560 | "n_sizes": 3, 561 | "name": "OAI211", 562 | "out_pin": "/ZN", 563 | "sizes": [ 564 | "_X1", 565 | "_X2", 566 | "_X4" 567 | ], 568 | "sizesi": [ 569 | 1, 570 | 2, 571 | 4 572 | ] 573 | }, 574 | "36": { 575 | "c_in": [ 576 | 1.569676, 577 | 2.996374 578 | ], 579 | "n_sizes": 2, 580 | "name": "OAI221", 581 | "out_pin": "/ZN", 582 | "sizes": [ 583 | "_X1", 584 | "_X2" 585 | ], 586 | "sizesi": [ 587 | 1, 588 | 2 589 | ] 590 | }, 591 | "37": { 592 | "c_in": [ 593 | 1.620241 594 | ], 595 | "n_sizes": 1, 596 | "name": "OAI222", 597 | "out_pin": "/ZN", 598 | "sizes": [ 599 | "_X1" 600 | ], 601 | "sizesi": [ 602 | 1 603 | ] 604 | }, 605 | "38": { 606 | "c_in": [ 607 | 0.941939, 608 | 1.694286, 609 | 3.384966 610 | ], 611 | "n_sizes": 3, 612 | "name": "OR2", 613 | "out_pin": "/ZN", 614 | "sizes": [ 615 | "_X1", 616 | "_X2", 617 | "_X4" 618 | ], 619 | "sizesi": [ 620 | 1, 621 | 2, 622 | 4 623 | ] 624 | }, 625 | "39": { 626 | "c_in": [ 627 | 0.921561, 628 | 1.672973, 629 | 3.374939 630 | ], 631 | "n_sizes": 3, 632 | "name": "OR3", 633 | "out_pin": "/ZN", 634 | "sizes": [ 635 | "_X1", 636 | "_X2", 637 | "_X4" 638 | ], 639 | "sizesi": [ 640 | 1, 641 | 2, 642 | 4 643 | ] 644 | }, 645 | "40": { 646 | "c_in": [ 647 | 0.914189, 648 | 1.636906, 649 | 3.348292 650 | ], 651 | "n_sizes": 3, 652 | "name": "OR4", 653 | "out_pin": "/ZN", 654 | "sizes": [ 655 | "_X1", 656 | "_X2", 657 | "_X4" 658 | ], 659 | "sizesi": [ 660 | 1, 661 | 2, 662 | 4 663 | ] 664 | }, 665 | "48": { 666 | "c_in": [ 667 | 2.232754, 668 | 4.003783 669 | ], 670 | "n_sizes": 2, 671 | "name": "XNOR2", 672 | "out_pin": "/ZN", 673 | "sizes": [ 674 | "_X1", 675 | "_X2" 676 | ], 677 | "sizesi": [ 678 | 1, 679 | 2 680 | ] 681 | }, 682 | "49": { 683 | "c_in": [ 684 | 2.232144, 685 | 4.330455 686 | ], 687 | "n_sizes": 2, 688 | "name": "XOR2", 689 | "out_pin": "/Z", 690 | "sizes": [ 691 | "_X1", 692 | "_X2" 693 | ], 694 | "sizesi": [ 695 | 1, 696 | 2 697 | ] 698 | } 699 | } 700 | -------------------------------------------------------------------------------- /session1/README.md: -------------------------------------------------------------------------------- 1 | 2 | # OpenROAD Python API Tutorial 3 | This page shows example scripting of OpenROAD Python APIs. 4 | # Building OpenROAD Binary 5 | Clone OpenROAD 6 | ``` 7 | git clone --recursive https://github.com/The-OpenROAD-Project/OpenROAD.git 8 | ``` 9 | Build OpenROAD Binary 10 | ``` 11 | cd ./OpenROAD/ 12 | mkdir build 13 | cd build 14 | cmake .. 15 | make -j 16 | ``` 17 | 18 | # Running OpenROAD in Python Evironment 19 | ## Execute a Simple Script 20 | ``` 21 | ./ -python .py 22 | ``` 23 | ## Execute a Script with arguments 24 | ``` 25 | ./ -python .py 26 | 27 | ``` 28 | ## Using OpenROAD in a Python Shell 29 | ### Import OpenROAD into Python env 30 | ``` 31 | import openroad 32 | ``` 33 | ### Read Cell Library Files using OpenROAD Python API 34 | ``` 35 | from openroad import Tech, Design, Timing 36 | 37 | # Must declare a Tech Object 38 | tech = Tech() 39 | 40 | tech.readLiberty("libFile") 41 | tech.readLef("lefFile") 42 | 43 | # Must follow the hierarchy 44 | design = Design(tech) 45 | timing = Timing(design) 46 | ``` 47 | ### Read Verilog File using OpenROAD Python API 48 | ``` 49 | from openroad import Tech, Design, Timing 50 | 51 | tech = Tech() 52 | design = Design(tech) 53 | design.readVerilog("verilogFile.v") 54 | # Link the Top Module 55 | design.link("topModuleName") 56 | ``` 57 | ### Read DEF File using OpenROAD Python API 58 | ``` 59 | from openroad import Tech, Design, Timing 60 | 61 | tech = Tech() 62 | design = Design(tech) 63 | design.readDef("defFile.def") 64 | ``` 65 | ### Read SDC File using OpenROAD Python API 66 | ``` 67 | from openroad import Tech, Design, Timing 68 | 69 | tech = Tech() 70 | design = Design(tech) 71 | sdcFile = "sdcFile.sdc" 72 | design.evalTclString("read_sdc %s"%sdcFile) 73 | ``` 74 | ### Write files 75 | ``` 76 | # Write DEF 77 | design.writeDef("final.def") 78 | 79 | # Write Gate-level Verilog file 80 | design.evalTclString("write_verilog %s.v"%"designName") 81 | 82 | # Write OpenDB file 83 | design.evalTclString("write_db .%s.odb"%"designName") 84 | ``` 85 | ## Query information from OpenDB 86 | ### Query library cell information 87 | ``` 88 | import openroad as ord 89 | 90 | # Get OpenDB 91 | db = ord.get_db() 92 | 93 | # Get all cell libraries from different files (if multiple .lib files are read) 94 | libs = db.getLibs() 95 | 96 | for lib in libs: 97 | # Get library name 98 | lib_name = lib.getName() 99 | 100 | # Get all library cells in that library 101 | lib_masters = lib.getMasters() 102 | for master in lib_masters: 103 | # Get the name of the library cell 104 | libcell_name = master.getName() 105 | 106 | # Get the area of it by getting the product of its height and width 107 | libcell_area = master.getHeight() * master.getWidth() 108 | ``` 109 | ### Query cell information 110 | Get all cells in the design 111 | ``` 112 | from openroad import Tech, Design 113 | 114 | tech = Tech() 115 | # Assume all files are read 116 | design = Design(tech) 117 | # Assume all files are read 118 | 119 | # Get the design 120 | block = design.getBlock() 121 | 122 | # Get a list of cells in the design 123 | insts = block.getInsts() 124 | inst = insts[0] 125 | ``` 126 | dBInst.getName() 127 | ``` 128 | # Get cell name 129 | cell_name = inst.getName() 130 | ``` 131 | Instance location 132 | ``` 133 | BBox = inst.getBBox() 134 | x0 = BBox.xMin() 135 | y0 = BBox.yMin() 136 | x1 = BBox.xMax() 137 | y1 = BBox.yMax() 138 | ``` 139 | dBInst.getMaster() 140 | ``` 141 | # Get the master cell of the instance (library cell) 142 | masterCell = inst.getMaster() 143 | ``` 144 | Design.isSequential() 145 | ``` 146 | # Return True if it's a flipflop 147 | isSeq = design.isSequential(masterCell) 148 | ``` 149 | dBMaster.isBlock() 150 | ``` 151 | # Return True if it's a macro 152 | isMacro = masterCell.isBlock() 153 | ``` 154 | dBMaster.isFiller() 155 | ``` 156 | # Return True if it's a filler cell 157 | isFiller = masterCell.isFiller() 158 | ``` 159 | Design.isBuffer() 160 | ``` 161 | # Return True if it's a buffer 162 | isBuffer = design.isBuffer(masterCell) 163 | ``` 164 | Design.isInverter() 165 | ``` 166 | # Return True if it's an inverter 167 | isInv = design.isInverter(masterCell) 168 | ``` 169 | Design.isInClock() 170 | ``` 171 | # Return True if it's in a clock net 172 | isInClk = design.isInClock(inst) 173 | ``` 174 | Timing.staticPower() 175 | ``` 176 | # Get the available design corner (change the index to use different corners) 177 | corner = timing.getCorners()[0] 178 | # Get the static power of a cell 179 | cellStaticPower = timing.staticPower(inst, corner) 180 | ``` 181 | Timing.dynamicPower() 182 | ``` 183 | # Get the available design corner (change the index to use different corners) 184 | corner = timing.getCorners()[0] 185 | # Get the dynamic power of a cell 186 | cellDynamicPower = timing.dynamicPower(inst, corner) 187 | ``` 188 | dBInst.getITerms() 189 | ``` 190 | # Get all pins of the cell 191 | ITerms = inst.getITerms() 192 | ``` 193 | ### Query net information 194 | Get all nets in the design in a list 195 | ``` 196 | # Get the design block 197 | block = design.getBlock() 198 | # Get all nets in a list in the design 199 | nets = block.getNets() 200 | net = nets[0] 201 | ``` 202 | dBNet.getSigType() 203 | ``` 204 | # Return "POWER" if the net is a power (VDD) net. Return "GROUND" is the net is a ground (VSS) net. 205 | sigType = net.getSigType() 206 | ``` 207 | dBNet.getName() 208 | ``` 209 | # Get the name of the net 210 | net_name = net.getName() 211 | ``` 212 | dBNet.getITerms() 213 | ``` 214 | # Get all the pins connected to this net 215 | net_ITerms = net.getITerms() 216 | ``` 217 | dBNet.getTotalCapacitance() 218 | ``` 219 | # Get the total wire capacitance of the net 220 | net_cap = net.getTotalCapacitance() 221 | ``` 222 | dBNet.getTotalResistance() 223 | ``` 224 | # Get the total wire resistance of the net 225 | net_res = net.getTotalResistance() 226 | ``` 227 | dBNet.getTotalCouplingCap() 228 | ``` 229 | # Get the total wire coupling capacitance of the net 230 | net_coupling = net.getTotalCouplingCap() 231 | ``` 232 | Timing.getNetCap() 233 | ``` 234 | # Get the available design corner (change the index to use different corners) 235 | corner = timing.getCorners()[0] 236 | # Get the pin capacitance + wire capacitance of the net 237 | total_cap = timing.getNetCap(net, corner, timing.Max) 238 | ``` 239 | dbITerm.isInputSignal() 240 | ``` 241 | # Get the number of fanout of the net 242 | outputPins = [] 243 | net_ITerms = net.getITerms() 244 | for ITerm in net_ITerms: 245 | if (ITerm.isInputSignal()): 246 | outputPins.append(ITerm) 247 | fanOut = len(outputPins) 248 | ``` 249 | Design.getNetRoutedLength() 250 | ``` 251 | # Get the length of the net 252 | netRouteLength = design.getNetRoutedLength(net) 253 | ``` 254 | ### Query pin information 255 | Get all pins in the design in a list 256 | ``` 257 | block = design.getBlock() 258 | # Get all pins in the design 259 | ITerms = block.getITerms() 260 | ITerm = ITerms[0] 261 | ``` 262 | Design.getITermName() 263 | ``` 264 | # Get the pin name 265 | pinName = design.getITermName(ITerm) 266 | ``` 267 | dBITerm.getNet() 268 | ``` 269 | # The net connects to this pin 270 | net = ITerm.getNet() 271 | ``` 272 | dBITerm.getInst() 273 | ``` 274 | # Get the cell that this pin belongs to 275 | cell = ITerm.getInst() 276 | ``` 277 | dBITerm.isOutputSignal() 278 | ``` 279 | # Return True if the pin is an output pin of the cell 280 | outputPin = ITerm.isOutputSignal() 281 | ``` 282 | dBITerm.isInputSignal() 283 | ``` 284 | # Return True if the pin is an input pin of the cell 285 | inputPin = ITerm.isInputSignal() 286 | ``` 287 | dBITerm.getAvgXY() 288 | ``` 289 | # Get the x and y location of the pin 290 | PinXY_list = ITerm.getAvgXY() 291 | if PinXY_list[0]: 292 | x = PinXY_list[1] 293 | y = PinXY_list[2] 294 | ``` 295 | Timing.isEndpoint() 296 | ``` 297 | # Return True if the pin is a sink pin of any timing path 298 | is_endpoint = timing.isEndpoint(ITerm) 299 | ``` 300 | Timing.getPinSlew() 301 | ``` 302 | # Get the slew of the pin 303 | pinSlew = timing.getPinSlew(ITerm) 304 | ``` 305 | Timing.getPinSlack() 306 | ``` 307 | # Get the falling slack of the pin 308 | pinFallSlack = timing.getPinSlack(ITerm, timing.Fall, timing.Max) 309 | # Get the rising slack of the pin 310 | pinRiseSlack = timing.getPinSlack(ITerm, timing.Rise, timing.Max) 311 | ``` 312 | Timing.getPinArrival() 313 | ``` 314 | # Get the rising arrival time of the pin 315 | pinRiseArr = timing.getPinArrival(ITerm, timing.Rise) 316 | # Get the falling arrival time of the pin 317 | pinFallArr = timing.getPinArrival(ITerm, timing.Fall) 318 | ``` 319 | Timing.getMaxCapLimit() 320 | ``` 321 | # Get the max permitted load capacitance limit of the pin 322 | maxCap = timing.getMaxCapLimit(library_cell_pin) 323 | ``` 324 | Timing.getMaxSlewLimit() 325 | ``` 326 | # Get the max permitted slew of the pin 327 | maxSlew = timing.getMaxSlewLimit(library_cell_pin) 328 | ``` 329 | dBITerm.isInputSignal() 330 | Timing.getPortCap() 331 | ``` 332 | # Get the input capacitance of a pin if it is an input pin of a cell 333 | if ITerm.isInputSignal(): 334 | inputPinCap = timing.getPortCap(ITerm, corner, timing.Max) 335 | ``` 336 | ## Using OpenROAD Python APIs to Perform Physical Design Steps 337 | Floorplan using utilization rate 338 | ``` 339 | import odb 340 | 341 | # Get OpenROAD's Floorplanner 342 | floorplan = design.getFloorplan() 343 | # Set the floorplan utilization to 45% 344 | floorplan_utilization = 45 345 | # Set the aspect ratio of the design (height/width) as 1.5 346 | floorplan_aspect_ratio = 1.5 347 | # Set the spacing between core and die as 14 um 348 | floorplan_core_spacing = [design.micronToDBU(14) for i in range(4)] 349 | # Find the site name in lef 350 | site = floorplan.findSite("siteName") 351 | floorplan.initFloorplan(floorplan_utilization, floorplan_aspect_ratio, 352 | floorplan_core_spacing[0], floorplan_core_spacing[1], 353 | floorplan_core_spacing[2], floorplan_core_spacing[3], site) 354 | # Create Tracks 355 | floorplan.makeTracks() 356 | ``` 357 | Floorplan using manually set area 358 | ``` 359 | import odb 360 | 361 | # Get OpenROAD's Floorplanner 362 | floorplan = design.getFloorplan() 363 | # Set the core and die area 364 | # The four args are bottom-left x, bottom-left y, top-right x and top-right y 365 | die_area = odb.Rect(design.micronToDBU(0), design.micronToDBU(0), design.micronToDBU(40), design.micronToDBU(60)) 366 | core_area = odb.Rect(design.micronToDBU(10), design.micronToDBU(10), design.micronToDBU(30), design.micronToDBU(50)) 367 | # Find the site in lef 368 | site = floorplan.findSite("site_name") 369 | floorplan.initFloorplan(die_area, core_area, site) 370 | # Create Tracks 371 | floorplan.makeTracks() 372 | ``` 373 | Place IO pins 374 | ``` 375 | params = design.getIOPlacer().getParameters() 376 | params.setRandSeed(42) 377 | params.setMinDistanceInTracks(False) 378 | params.setMinDistance(design.micronToDBU(0)) 379 | params.setCornerAvoidance(design.micronToDBU(0)) 380 | # Place the pins on M8 and M9 381 | design.getIOPlacer().addHorLayer(design.getTech().getDB().getTech().findLayer("M8")) 382 | design.getIOPlacer().addVerLayer(design.getTech().getDB().getTech().findLayer("M9")) 383 | IOPlacer_random_mode = True 384 | design.getIOPlacer().run(IOPlacer_random_mode) 385 | ``` 386 | Global Placement 387 | ``` 388 | gpl = design.getReplace() 389 | gpl.setTimingDrivenMode(False) 390 | gpl.setRoutabilityDrivenMode(True) 391 | gpl.setUniformTargetDensityMode(True) 392 | # Set the max iteration of global placement to 30 times 393 | gpl.setInitialPlaceMaxIter(30) 394 | gpl.setInitDensityPenalityFactor(0.05) 395 | gpl.doInitialPlace() 396 | gpl.doNesterovPlace() 397 | gpl.reset() 398 | ``` 399 | Macro Placement 400 | ``` 401 | macros = [inst for inst in ord.get_db_block().getInsts() if inst.getMaster().isBlock()] 402 | if len(macros) > 0: 403 | mpl = design.getMacroPlacer() 404 | # Set the halo around macros to 5 microns 405 | mpl_halo_x, mpl_halo_y = 5, 5 406 | mpl.setHalo(mpl_halo_x, mpl_halo_y) 407 | # Set the channel width between macros to 5 microns 408 | mpl_channel_x, mpl_channel_y = 5, 5 409 | mpl.setChannel(mpl_channel_x, mpl_channel_y) 410 | # Set the fence region as a user defined area in microns 411 | design.getMacroPlacer().setFenceRegion(32, 55, 32, 60) 412 | # Snap the macro to layer M4 (usually M4) 413 | layer = design.getTech().getDB().getTech().findLayer("M4") 414 | mpl.setSnapLayer(layer) 415 | mpl.placeMacrosCornerMaxWl() 416 | ``` 417 | Detailed Placement 418 | ``` 419 | site = design.getBlock().getRows()[0].getSite() 420 | max_disp_x = int(design.micronToDBU(0.5) / site.getWidth()) 421 | max_disp_y = int(design.micronToDBU(1) / site.getHeight()) 422 | design.getOpendp().detailedPlacement(max_disp_x, max_disp_y, "", False) 423 | ``` 424 | Clock Tree Synthesis 425 | ``` 426 | design.evalTclString("set_propagated_clock [core_clock]") 427 | design.evalTclString("set_wire_rc -clock -resistance 0.0435 -capacitance 0.0817") 428 | design.evalTclString("set_wire_rc -signal -resistance 0.0435 -capacitance 0.0817") 429 | 430 | cts = design.getTritonCts() 431 | parms = cts.getParms() 432 | parms.setWireSegmentUnit(20) 433 | # Can choose different buffer cells for cts 434 | cts.setBufferList("BUF_X3") 435 | cts.setRootBuffer("BUF_X3") 436 | cts.setSinkBuffer("BUF_X3") 437 | cts.runTritonCts() 438 | # Followed by detailed placement to legalize the clock buffers 439 | site = design.getBlock().getRows()[0].getSite() 440 | max_disp_x = int(design.micronToDBU(0.5) / site.getWidth()) 441 | max_disp_y = int(design.micronToDBU(1) / site.getHeight()) 442 | design.getOpendp().detailedPlacement(max_disp_x, max_disp_y, "", False) 443 | ``` 444 | Add Filler Cells 445 | ``` 446 | db = ord.get_db() 447 | filler_masters = list() 448 | # Filler cell prefix may be different when using different library 449 | filler_cells_prefix = "filler*" 450 | for lib in db.getLibs(): 451 | for master in lib.getMasters(): 452 | master_name = master.getConstName() 453 | if re.fullmatch(filler_cells_prefix, master_name) != None: 454 | filler_masters.append(master) 455 | if len(filler_masters) == 0: 456 | print("wrong filler cell prefix") 457 | else: 458 | design.getOpendp().fillerPlacement(filler_masters, filler_cells_prefix) 459 | ``` 460 | Power Planning 461 | ``` 462 | import pdn, odb 463 | 464 | # Global Connect 465 | for net in design.getBlock().getNets(): 466 | if net.getSigType() == "POWER" or net.getSigType() == "GROUND": 467 | net.setSpecial() 468 | VDD_net = design.getBlock().findNet("VDD") 469 | VSS_net = design.getBlock().findNet("VSS") 470 | switched_power = None 471 | secondary = list() 472 | if VDD_net == None: 473 | VDD_net = odb.dbNet_create(design.getBlock(), "VDD") 474 | VDD_net.setSpecial() 475 | VDD_net.setSigType("POWER") 476 | if VSS_net == None: 477 | VSS_net = odb.dbNet_create(design.getBlock(), "VSS") 478 | VSS_net.setSpecial() 479 | VSS_net.setSigType("GROUND") 480 | design.getBlock().addGlobalConnect(region = None, instPattern = ".*", 481 | pinPattern = "^VDD$", net = VDD_net, 482 | do_connect = True) 483 | design.getBlock().addGlobalConnect(region = None, instPattern = ".*", 484 | pinPattern = "^VDDPE$", net = VDD_net, 485 | do_connect = True) 486 | design.getBlock().addGlobalConnect(region = None, instPattern = ".*", 487 | pinPattern = "^VDDCE$", net = VDD_net, 488 | do_connect = True) 489 | design.getBlock().addGlobalConnect(region = None, instPattern = ".*", 490 | pinPattern = "^VSS$", net = VSS_net, 491 | do_connect = True) 492 | design.getBlock().addGlobalConnect(region = None, instPattern = ".*", 493 | pinPattern = "^VSSE$", net = VSS_net, 494 | do_connect = True) 495 | design.getBlock().globalConnect() 496 | # Voltage Domains 497 | pdngen = design.getPdnGen() 498 | pdngen.setCoreDomain(power = VDD_net, switched_power = switched_power, 499 | ground = VSS_net, secondary = secondary) 500 | # Set the width of the PDN ring and the spacing between VDD and VSS rings 501 | core_ring_width = [design.micronToDBU(5), design.micronToDBU(5)] 502 | core_ring_spacing = [design.micronToDBU(5), design.micronToDBU(5)] 503 | core_ring_core_offset = [design.micronToDBU(0) for i in range(4)] 504 | core_ring_pad_offset = [design.micronToDBU(0) for i in range(4)] 505 | # When the two layers are parallel, specify the distance between via cuts. 506 | pdn_cut_pitch = [design.micronToDBU(2) for i in range(2)] 507 | 508 | ring_connect_to_pad_layers = list() 509 | for layer in design.getTech().getDB().getTech().getLayers(): 510 | if layer.getType() == "ROUTING": 511 | ring_connect_to_pad_layers.append(layer) 512 | 513 | # Define power grid for core 514 | domains = [pdngen.findDomain("Core")] 515 | halo = [design.micronToDBU(0) for i in range(4)] 516 | for domain in domains: 517 | pdngen.makeCoreGrid(domain = domain, name = "top_pdn", starts_with = pdn.GROUND, 518 | pin_layers = [], generate_obstructions = [], powercell = None, 519 | powercontrol = None, powercontrolnetwork = "STAR") 520 | m1 = design.getTech().getDB().getTech().findLayer("M1") 521 | m4 = design.getTech().getDB().getTech().findLayer("M4") 522 | m7 = design.getTech().getDB().getTech().findLayer("M7") 523 | m8 = design.getTech().getDB().getTech().findLayer("M8") 524 | grid = pdngen.findGrid("top_pdn") 525 | for g in grid: 526 | # Make Ring for the core 527 | pdngen.makeRing(grid = g, layer0 = m7, width0 = core_ring_width[0], spacing0 = core_ring_spacing[0], 528 | layer1 = m8, width1 = core_ring_width[0], spacing1 = core_ring_spacing[0], 529 | starts_with = pdn.GRID, offset = core_ring_core_offset, pad_offset = core_ring_pad_offset, extend = False, 530 | pad_pin_layers = ring_connect_to_pad_layers, nets = []) 531 | # Add power and ground grid on M1 and attach to cell's VDD/VSS pin 532 | pdngen.makeFollowpin(grid = g, layer = m1, 533 | width = design.micronToDBU(0.07), extend = pdn.CORE) 534 | # Create the rest of the power delivery network 535 | pdngen.makeStrap(grid = g, layer = m4, width = design.micronToDBU(1.2), 536 | spacing = design.micronToDBU(1.2), pitch = design.micronToDBU(6), offset = design.micronToDBU(0), 537 | number_of_straps = 0, snap = False, starts_with = pdn.GRID, extend = pdn.CORE, nets = []) 538 | pdngen.makeStrap(grid = g, layer = m7, width = design.micronToDBU(1.4), 539 | spacing = design.micronToDBU(1.4), pitch = design.micronToDBU(10.8), offset = design.micronToDBU(0), 540 | number_of_straps = 0, snap = False, starts_with = pdn.GRID, extend = pdn.RINGS, nets = []) 541 | pdngen.makeConnect(grid = g, layer0 = m1, layer1 = m4, 542 | cut_pitch_x = pdn_cut_pitch[0], cut_pitch_y = pdn_cut_pitch[1], vias = [], techvias = [], 543 | max_rows = 0, max_columns = 0, ongrid = [], split_cuts = dict(), dont_use_vias = ) 544 | pdngen.makeConnect(grid = g, layer0 = m4, layer1 = m7, 545 | cut_pitch_x = pdn_cut_pitch[0], cut_pitch_y = pdn_cut_pitch[1], vias = [], techvias = [], 546 | max_rows = 0, max_columns = 0, ongrid = [], split_cuts = dict(), dont_use_vias = ) 547 | pdngen.makeConnect(grid = g, layer0 = m7, layer1 = m8, 548 | cut_pitch_x = pdn_cut_pitch[0], cut_pitch_y = pdn_cut_pitch[1], vias = [], techvias = [], 549 | max_rows = 0, max_columns = 0, ongrid = [], split_cuts = dict(), dont_use_vias = ) 550 | # Create power delivery network for macros 551 | # Set the width of the PDN ring for macros and the spacing between VDD and VSS rings for macros 552 | macro_ring_width = [design.micronToDBU(2), design.micronToDBU(2)] 553 | macro_ring_spacing = [design.micronToDBU(2), design.micronToDBU(2)] 554 | macro_ring_core_offset = [design.micronToDBU(0) for i in range(4)] 555 | macro_ring_pad_offset = [design.micronToDBU(0) for i in range(4)] 556 | m5 = design.getTech().getDB().getTech().findLayer("M5") 557 | m6 = design.getTech().getDB().getTech().findLayer("M6") 558 | for i in range(len(macros)): 559 | for domain in domains: 560 | pdngen.makeInstanceGrid(domain = domain, name = "Macro_core_grid_" + str(i), 561 | starts_with = pdn.GROUND, inst = macros[i], halo = halo, 562 | pg_pins_to_boundary = True, default_grid = False, 563 | generate_obstructions = [], is_bump = False) 564 | grid = pdngen.findGrid("Macro_core_grid_" + str(i)) 565 | for g in grid: 566 | pdngen.makeRing(grid = g, layer0 = m5, width0 = macro_ring_width[0], spacing0 = macro_ring_spacing[0], 567 | layer1 = m6, width1 = macro_ring_width[0], spacing1 = macro_ring_spacing[0], 568 | starts_with = pdn.GRID, offset = macro_ring_core_offset, pad_offset = macro_ring_pad_offset, extend = False, 569 | pad_pin_layers = macro_ring_connect_to_pad_layers, nets = []) 570 | pdngen.makeStrap(grid = g, layer = m5, width = design.micronToDBU(1.2), 571 | spacing = design.micronToDBU(1.2), pitch = design.micronToDBU(6), offset = design.micronToDBU(0), 572 | number_of_straps = 0, snap = True, starts_with = pdn.GRID, extend = pdn.RINGS, nets = []) 573 | pdngen.makeStrap(grid = g, layer = m6, width = design.micronToDBU(1.2), 574 | spacing = design.micronToDBU(1.2), pitch = design.micronToDBU(6), offset = design.micronToDBU(0), 575 | number_of_straps = 0, snap = True, starts_with = pdn.GRID, extend = pdn.RINGS, nets = []) 576 | pdngen.makeConnect(grid = g, layer0 = m4, layer1 = m5, 577 | cut_pitch_x = pdn_cut_pitch[0], cut_pitch_y = pdn_cut_pitch[1], vias = [], techvias = [], 578 | max_rows = 0, max_columns = 0, ongrid = [], split_cuts = dict(), dont_use_vias = ) 579 | pdngen.makeConnect(grid = g, layer0 = m5, layer1 = m6, 580 | cut_pitch_x = pdn_cut_pitch[0], cut_pitch_y = pdn_cut_pitch[1], vias = [], techvias = [], 581 | max_rows = 0, max_columns = 0, ongrid = [], split_cuts = dict(), dont_use_vias = ) 582 | pdngen.makeConnect(grid = g, layer0 = m6, layer1 = m7, 583 | cut_pitch_x = pdn_cut_pitch[0], cut_pitch_y = pdn_cut_pitch[1], vias = [], techvias = [], 584 | max_rows = 0, max_columns = 0, ongrid = [], split_cuts = dict(), dont_use_vias = ) 585 | 586 | pdngen.checkSetup() 587 | pdngen.buildGrids(False) 588 | pdngen.writeToDb(True, ) 589 | pdngen.resetShapes() 590 | ``` 591 | Global Routing 592 | ``` 593 | signal_low_layer = design.getTech().getDB().getTech().findLayer("M1").getRoutingLevel() 594 | signal_high_layer = design.getTech().getDB().getTech().findLayer("M6").getRoutingLevel() 595 | clk_low_layer = design.getTech().getDB().getTech().findLayer("M1").getRoutingLevel() 596 | clk_high_layer = design.getTech().getDB().getTech().findLayer("M6").getRoutingLevel() 597 | grt = design.getGlobalRouter() 598 | grt.setMinRoutingLayer(signal_low_layer) 599 | grt.setMaxRoutingLayer(signal_high_layer) 600 | grt.setMinLayerForClock(clk_low_layer) 601 | grt.setMaxLayerForClock(clk_high_layer) 602 | grt.setAdjustment(0.5) 603 | grt.setVerbose(True) 604 | grt.globalRoute(True) 605 | design.getBlock().writeGuides("%s.guide"%designName) 606 | design.evalTclString("estimate_parasitics -global_routing") 607 | ``` 608 | Detailed Routing 609 | ``` 610 | import drt 611 | 612 | drter = design.getTritonRoute() 613 | params = drt.ParamStruct() 614 | params.outputMazeFile = "" 615 | params.outputDrcFile = "" 616 | params.outputCmapFile = "" 617 | params.outputGuideCoverageFile = "" 618 | params.dbProcessNode = "" 619 | params.enableViaGen = True 620 | params.drouteEndIter = 1 621 | params.viaInPinBottomLayer = "" 622 | params.viaInPinTopLayer = "" 623 | params.orSeed = -1 624 | params.orK = 0 625 | params.bottomRoutingLayer = "M1" 626 | params.topRoutingLayer = "M6" 627 | params.verbose = 1 628 | params.cleanPatches = True 629 | params.doPa = True 630 | params.singleStepDR = False 631 | params.minAccessPoints = 1 632 | params.saveGuideUpdates = False 633 | drter.setParams(params) 634 | drter.main() 635 | ``` 636 | Static IR drop Analysis 637 | ``` 638 | psm_obj = design.getPDNSim() 639 | psm_obj.setNet(ord.Tech().getDB().getChip().getBlock().findNet("VDD")) 640 | design.evalTclString(f"psm::set_corner [sta::cmd_corner]") 641 | psm_obj.analyzePowerGrid('', False, '', '') 642 | drops = psm_obj.getIRDropForLayer(tech.getDB().getTech().findLayer("M2")) 643 | ``` 644 | Gate Sizing 645 | ``` 646 | timing.makeEquivCells() 647 | # First pick an instance 648 | inst = block.findInst("cellName") 649 | # Then get the library cell information 650 | instMaster = inst.getMaster() 651 | equivCells = timing.equivCells(instMaster) 652 | # Perform gate sizing with randomly select an available equivalent cell 653 | inst.swapMaster(equivCells[0]) 654 | ``` 655 | ## Other necessary steps 656 | Set Unit RC Value of Layers 657 | ``` 658 | design.evalTclString("set_layer_rc -layer M1 -resistance 1.3889e-01 -capacitance 1.1368e-01") 659 | # M2 and the rest of the layers can follow the same method 660 | ``` 661 | Connect VDD/VSS pins to nets 662 | ``` 663 | import odb 664 | 665 | # Find the VDD net 666 | VDDNet = design.getBlock().findNet("VDD") 667 | # Create VDD net if it does not exist 668 | if VDDNet is None: 669 | VDDNet = odb.dbNet_create(design.getBlock(), "VDD") 670 | # Raise the special flag of the VDD net 671 | VDDNet.setSpecial() 672 | # Assign the "VDD" net to the "POWER" type 673 | VDDNet.setSigType("POWER") 674 | # Find the VSS net 675 | VSSNet = design.getBlock().findNet("VSS") 676 | # Create VSS net if it does not exist 677 | if VSSNet is None: 678 | VSSNet = odb.dbNet_create(design.getBlock(), "VSS") 679 | # Raise the special flag of the VSS net 680 | VSSNet.setSpecial() 681 | # Assign the "VSS" net to the "GROUND" type 682 | VSSNet.setSigType("GROUND") 683 | # Connect the pins to the nets 684 | design.getBlock().addGlobalConnect(None, ".*", "VDD", VDDNet, True) 685 | design.getBlock().addGlobalConnect(None, ".*", "VSS", VSSNet, True) 686 | # Establish global connect 687 | design.getBlock().globalConnect() 688 | ``` 689 | Set the clock signal 690 | ``` 691 | # Create clock signal 692 | design.evalTclString("create_clock -period "period in ps" [get_ports "portName"] -name "clockName"") 693 | # Propagate the clock signal 694 | design.evalTclString("set_propagated_clock [all_clocks]") 695 | ``` 696 | 697 | 698 | 699 | 700 | 701 | 702 | 703 | -------------------------------------------------------------------------------- /session1/demo1_flow.py: -------------------------------------------------------------------------------- 1 | #BSD 3-Clause License 2 | # 3 | #Copyright (c) 2023, ASU-VDA-Lab 4 | # 5 | #Redistribution and use in source and binary forms, with or without 6 | #modification, are permitted provided that the following conditions are met: 7 | # 8 | #1. Redistributions of source code must retain the above copyright notice, this 9 | # list of conditions and the following disclaimer. 10 | # 11 | #2. Redistributions in binary form must reproduce the above copyright notice, 12 | # this list of conditions and the following disclaimer in the documentation 13 | # and/or other materials provided with the distribution. 14 | # 15 | #3. Neither the name of the copyright holder nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | #AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | #IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | #DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | #FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | #DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | #SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | import openroad as ord 30 | from openroad import Tech, Design 31 | import os, odb, drt 32 | from pathlib import Path 33 | from demo1_helpers import load_design 34 | import argparse 35 | 36 | ############ 37 | #Read Files# 38 | ############ 39 | 40 | parser = argparse.ArgumentParser(description="Path to root of the tutorial directory") 41 | parser.add_argument("--path", type = Path, default='./', action = 'store') 42 | pyargs = parser.parse_args() 43 | tech, design = load_design(pyargs.path, verilog = True) 44 | # tech, design = load_design(Path("../", verilog = True) # For demo to be copied. 45 | 46 | ############### 47 | #Floorplanning# 48 | ############### 49 | floorplan = design.getFloorplan() 50 | die_area = odb.Rect(design.micronToDBU(0), design.micronToDBU(0), design.micronToDBU(45), design.micronToDBU(45)) 51 | core_area = odb.Rect(design.micronToDBU(5), design.micronToDBU(5), design.micronToDBU(40), design.micronToDBU(40)) 52 | floorplan.initFloorplan(die_area, core_area) 53 | floorplan.makeTracks() 54 | 55 | ############ 56 | #Place Pins# 57 | ############ 58 | design.getIOPlacer().addHorLayer(design.getTech().getDB().getTech().findLayer("metal8")) 59 | design.getIOPlacer().addVerLayer(design.getTech().getDB().getTech().findLayer("metal7")) 60 | design.getIOPlacer().run(True) 61 | 62 | ################ 63 | #Power Planning# 64 | ################ 65 | VDD_net = design.getBlock().findNet("VDD") 66 | if VDD_net is None: 67 | VDD_net = odb.dbNet_create(design.getBlock(), "VDD") 68 | VDD_net.setSpecial() 69 | VDD_net.setSigType("POWER") 70 | design.getBlock().addGlobalConnect(None, ".*", "VDD", VDD_net, True) 71 | 72 | VSS_net = design.getBlock().findNet("VSS") 73 | if VSS_net is None: 74 | VSS_net = odb.dbNet_create(design.getBlock(), "VSS") 75 | VSS_net.setSpecial() 76 | VSS_net.setSigType("GROUND") 77 | design.getBlock().addGlobalConnect(None, ".*", "VSS", VSS_net, True) 78 | 79 | design.getBlock().globalConnect() 80 | 81 | ################## 82 | #Global Placement# 83 | ################## 84 | gpl = design.getReplace() 85 | gpl.setTimingDrivenMode(False) 86 | gpl.setRoutabilityDrivenMode(False) 87 | gpl.setUniformTargetDensityMode(False) 88 | gpl.setInitialPlaceMaxIter(20) 89 | gpl.setTargetDensity(0.7) 90 | gpl.setInitDensityPenalityFactor(0.001) 91 | gpl.doInitialPlace() 92 | 93 | #################### 94 | #Detailed Placement# 95 | #################### 96 | site = design.getBlock().getRows()[0].getSite() 97 | max_disp_x = int(design.micronToDBU(0) / site.getWidth()) 98 | max_disp_y = int(design.micronToDBU(0) / site.getHeight()) 99 | design.getOpendp().detailedPlacement(max_disp_x, max_disp_y, "", False) 100 | 101 | ###################### 102 | #Clock Tree Synthesis# 103 | ###################### 104 | design.evalTclString("set_propagated_clock [core_clock]") 105 | design.evalTclString("set_wire_rc -clock -resistance 3.574e-02 -capacitance 7.516e-02") 106 | design.evalTclString("set_wire_rc -signal -resistance 3.574e-02 -capacitance 7.516e-02") 107 | 108 | cts = design.getTritonCts() 109 | parms = cts.getParms() 110 | parms.setWireSegmentUnit(20) 111 | cts.setBufferList("CLKBUF_X3") 112 | cts.setRootBuffer("CLKBUF_X3") 113 | cts.setSinkBuffer("CLKBUF_X1") 114 | cts.runTritonCts() 115 | 116 | #################### 117 | #Detailed Placement# 118 | #################### 119 | site = design.getBlock().getRows()[0].getSite() 120 | max_disp_x = int(design.micronToDBU(0) / site.getWidth()) 121 | max_disp_y = int(design.micronToDBU(0) / site.getHeight()) 122 | design.getOpendp().detailedPlacement(max_disp_x, max_disp_y, "", False) 123 | 124 | ################ 125 | #Global Routing# 126 | ################ 127 | signal_low_layer = design.getTech().getDB().getTech().findLayer("metal1").getRoutingLevel() 128 | signal_high_layer = design.getTech().getDB().getTech().findLayer("metal8").getRoutingLevel() 129 | clk_low_layer = design.getTech().getDB().getTech().findLayer("metal3").getRoutingLevel() 130 | clk_high_layer = design.getTech().getDB().getTech().findLayer("metal8").getRoutingLevel() 131 | grt = design.getGlobalRouter() 132 | grt.setMinRoutingLayer(signal_low_layer) 133 | grt.setMaxRoutingLayer(signal_high_layer) 134 | grt.setMinLayerForClock(clk_low_layer) 135 | grt.setMaxLayerForClock(clk_high_layer) 136 | grt.setAdjustment(0.5) 137 | grt.setVerbose(True) 138 | grt.globalRoute(True) 139 | 140 | ################## 141 | #Detailed Routing# 142 | ################## 143 | drter = design.getTritonRoute() 144 | params = drt.ParamStruct() 145 | params.outputMazeFile = "" 146 | params.outputDrcFile = "" 147 | params.outputCmapFile = "" 148 | params.outputGuideCoverageFile = "" 149 | params.dbProcessNode = "" 150 | params.enableViaGen = True 151 | params.drouteEndIter = 1 152 | params.viaInPinBottomLayer = "" 153 | params.viaInPinTopLayer = "" 154 | params.orSeed = -1 155 | params.orK = 0 156 | params.bottomRoutingLayer = "metal1" 157 | params.topRoutingLayer = "metal8" 158 | params.verbose = 1 159 | params.cleanPatches = True 160 | params.doPa = True 161 | params.singleStepDR = False 162 | params.minAccessPoints = 1 163 | params.saveGuideUpdates = False 164 | drter.setParams(params) 165 | drter.main() 166 | 167 | design.writeDef("tmp.def") 168 | 169 | -------------------------------------------------------------------------------- /session1/demo1_helpers.py: -------------------------------------------------------------------------------- 1 | #BSD 3-Clause License 2 | # 3 | #Copyright (c) 2023, ASU-VDA-Lab 4 | # 5 | #Redistribution and use in source and binary forms, with or without 6 | #modification, are permitted provided that the following conditions are met: 7 | # 8 | #1. Redistributions of source code must retain the above copyright notice, this 9 | # list of conditions and the following disclaimer. 10 | # 11 | #2. Redistributions in binary form must reproduce the above copyright notice, 12 | # this list of conditions and the following disclaimer in the documentation 13 | # and/or other materials provided with the distribution. 14 | # 15 | #3. Neither the name of the copyright holder nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | #AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | #IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | #DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | #FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | #DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | #SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | import openroad as ord 30 | from openroad import Tech, Design 31 | import os, odb, drt 32 | from pathlib import Path 33 | 34 | def load_design(demo_path, verilog = False): 35 | #Read Files 36 | tech = Tech() 37 | libDir = demo_path/"platforms/lib/" 38 | lefDir = demo_path/"platforms/lef/" 39 | designDir = demo_path/"designs/" 40 | # Read technology files 41 | libFiles = libDir.glob('*.lib') 42 | techLefFiles = lefDir.glob('*tech.lef') 43 | lefFiles = lefDir.glob('*.lef') 44 | for libFile in libFiles: 45 | tech.readLiberty(libFile.as_posix()) 46 | for techLefFile in techLefFiles: 47 | tech.readLef(techLefFile.as_posix()) 48 | for lefFile in lefFiles: 49 | tech.readLef(lefFile.as_posix()) 50 | design = Design(tech) 51 | #Read design files 52 | 53 | if verilog: 54 | verilogFile = designDir/"gcd.v" 55 | design.readVerilog(f"{verilogFile}") 56 | design.link("gcd") 57 | else: 58 | defFile = designDir/'gcd.def' 59 | design.readDef(f"{defFile}") 60 | 61 | # Read the SDC file and set the clocks 62 | sdcFile = designDir/"gcd.sdc.gz" 63 | design.evalTclString(f"read_sdc {sdcFile}") 64 | design.evalTclString("create_clock -period 20 [get_ports clk] -name core_clock") 65 | design.evalTclString("set_propagated_clock [all_clocks]") 66 | 67 | return tech, design 68 | 69 | 70 | -------------------------------------------------------------------------------- /session1/demo1_query.py: -------------------------------------------------------------------------------- 1 | #BSD 3-Clause License 2 | # 3 | #Copyright (c) 2023, ASU-VDA-Lab 4 | # 5 | #Redistribution and use in source and binary forms, with or without 6 | #modification, are permitted provided that the following conditions are met: 7 | # 8 | #1. Redistributions of source code must retain the above copyright notice, this 9 | # list of conditions and the following disclaimer. 10 | # 11 | #2. Redistributions in binary form must reproduce the above copyright notice, 12 | # this list of conditions and the following disclaimer in the documentation 13 | # and/or other materials provided with the distribution. 14 | # 15 | #3. Neither the name of the copyright holder nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | #AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | #IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | #DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | #FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | #DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | #SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | import openroad as ord 30 | from openroad import Tech, Design, Timing 31 | import os, odb, drt 32 | from demo1_helpers import load_design 33 | from pathlib import Path 34 | import argparse 35 | 36 | parser = argparse.ArgumentParser(description="Path to root of the tutorial directory") 37 | parser.add_argument("--path", type = Path, default='./', action = 'store') 38 | pyargs = parser.parse_args() 39 | tech, design = load_design(pyargs.path, verilog = False) 40 | # tech, design = load_design(Path("../", verilog = False) # For demo to be copied. 41 | 42 | timing = Timing(design) 43 | corner = timing.getCorners()[0] 44 | block = design.getBlock() 45 | 46 | ############ 47 | #cell query# 48 | ############ 49 | print(" name | library_type | dynamic_power | static_power") 50 | insts = block.getInsts()[-10:] 51 | for inst in insts: 52 | inst_static_power = timing.staticPower(inst, corner) 53 | inst_dynamic_power = timing.dynamicPower(inst, corner) 54 | inst_name = inst.getName() 55 | libcell_name = inst.getMaster().getName() 56 | print(f"{inst_name:<11}| {libcell_name:<13}| {inst_dynamic_power:14.4e}| {inst_static_power:12.4e}") 57 | #hit tab for all available apis (ex. inst.[tab]) 58 | #the return type of power is float!!! 59 | print(f"Power return type: {type(inst_static_power)}") 60 | print("###################################################################") 61 | ########### 62 | #net query# 63 | ########### 64 | print(" name | net_type | pin&wire_capacitance") 65 | nets = block.getNets()[:10] 66 | for net in nets: 67 | pin_and_wire_cap = timing.getNetCap(net, corner, timing.Max) 68 | net_name = net.getName() 69 | net_type = net.getSigType() 70 | print(f"{net_name:<12}| {net_type:<9}| {pin_and_wire_cap:19.4e}") 71 | #hit tab for all available apis (ex. net.[tab]) 72 | #the return type of pin_and_wire_cap is float!!! 73 | print("###################################################################") 74 | ########### 75 | #pin query# 76 | ########### 77 | print(" name | rise_arrival_time | fall_arrival_time | rise_slack | fall_slack | slew") 78 | for inst in insts: 79 | inst_ITerms = inst.getITerms() 80 | for pin in inst_ITerms: 81 | if design.isInSupply(pin): 82 | continue 83 | pin_name = design.getITermName(pin) 84 | pin_rise_arr = timing.getPinArrival(pin, timing.Rise) 85 | pin_fall_arr = timing.getPinArrival(pin, timing.Fall) 86 | pin_rise_slack = timing.getPinSlack(pin, timing.Fall, timing.Max) 87 | pin_fall_slack = timing.getPinSlack(pin, timing.Rise, timing.Max) 88 | pin_slew = timing.getPinSlew(pin) 89 | print(f"{pin_name:<12} | {pin_rise_arr:17.4e} | {pin_fall_arr:17.4e} | {pin_rise_slack:10.4e} | {pin_fall_slack:10.4e} | {pin_slew:6.4e}") 90 | #hit tab for all available apis (ex. pin.[tab]) 91 | #the return type of slack is float!!! 92 | #timing-related properties go through timing.[tab] apis 93 | print("###################################################################") 94 | print(" name | rise_arrival_time | fall_arrival_time | rise_slack | fall_slack | slew") 95 | for net in nets: 96 | net_ITerms = net.getITerms() 97 | for pin in net_ITerms: 98 | pin_name = design.getITermName(pin) 99 | pin_rise_arr = timing.getPinArrival(pin, timing.Rise) 100 | pin_fall_arr = timing.getPinArrival(pin, timing.Fall) 101 | pin_rise_slack = timing.getPinSlack(pin, timing.Fall, timing.Max) 102 | pin_fall_slack = timing.getPinSlack(pin, timing.Rise, timing.Max) 103 | pin_slew = timing.getPinSlew(pin) 104 | print(f"{pin_name:<14} | {pin_rise_arr:17.4e} | {pin_fall_arr:<17.4e} | {pin_rise_slack:10.4e} | {pin_fall_slack:10.4e} | {pin_slew:6.4e}") 105 | print("###################################################################") 106 | 107 | 108 | -------------------------------------------------------------------------------- /session1/demo2_IR.py: -------------------------------------------------------------------------------- 1 | #BSD 3-Clause License 2 | # 3 | #Copyright (c) 2023, ASU-VDA-Lab 4 | # 5 | #Redistribution and use in source and binary forms, with or without 6 | #modification, are permitted provided that the following conditions are met: 7 | # 8 | #1. Redistributions of source code must retain the above copyright notice, this 9 | # list of conditions and the following disclaimer. 10 | # 11 | #2. Redistributions in binary form must reproduce the above copyright notice, 12 | # this list of conditions and the following disclaimer in the documentation 13 | # and/or other materials provided with the distribution. 14 | # 15 | #3. Neither the name of the copyright holder nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | #AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | #IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | #DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | #FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | #DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | #SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | import torch 30 | import torch.nn as nn 31 | import numpy as np 32 | from demo2_IR_helpers import OpenROAD_map_creation, load_design 33 | from demo2_IR_helpers import UNet 34 | import argparse 35 | from openroad import Tech, Design, Timing 36 | from pathlib import Path 37 | 38 | if __name__ == "__main__": 39 | ############# 40 | #load design# 41 | ############# 42 | parser = argparse.ArgumentParser(description="path of your CircuitOps clone (must include /CircuitOps)") 43 | parser.add_argument("--path", type = Path, default='./', action = 'store') 44 | pyargs = parser.parse_args() 45 | tech_design, design = load_design(pyargs.path) 46 | 47 | timing = Timing(design) 48 | 49 | corner = timing.getCorners()[0] 50 | ############ 51 | #load model# 52 | ############ 53 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 54 | model = UNet() 55 | ################# 56 | #get feature map# 57 | ################# 58 | row_height, track_width, static_power_map = OpenROAD_map_creation("static_power", tech_design, design, corner, -1, timing) 59 | row_height, track_width, dynamic_power_map = OpenROAD_map_creation("dynamic_power", tech_design, design, corner, -1, timing) 60 | row_height, track_width, m1_congestion_map = OpenROAD_map_creation("congestion", tech_design, design, corner, 2, timing) 61 | row_height, track_width, m2_congestion_map = OpenROAD_map_creation("congestion", tech_design, design, corner, 4, timing) 62 | row_height, track_width, m3_congestion_map = OpenROAD_map_creation("congestion", tech_design, design, corner, 6, timing) 63 | ################ 64 | #get golden map# 65 | ################ 66 | row_height, track_width, static_IR_map = OpenROAD_map_creation("static_IR", tech_design, design, corner, -1, timing) 67 | 68 | static_power_map = torch.Tensor(static_power_map).unsqueeze(0).unsqueeze(0) 69 | dynamic_power_map = torch.Tensor(dynamic_power_map).unsqueeze(0).unsqueeze(0) 70 | m1_congestion_map = torch.Tensor(m1_congestion_map).unsqueeze(0).unsqueeze(0) 71 | m2_congestion_map = torch.Tensor(m2_congestion_map).unsqueeze(0).unsqueeze(0) 72 | m3_congestion_map = torch.Tensor(m3_congestion_map).unsqueeze(0).unsqueeze(0) 73 | static_IR_map = torch.Tensor(static_IR_map) 74 | 75 | output_tensor = model([static_power_map, dynamic_power_map, m1_congestion_map, m2_congestion_map, m3_congestion_map]).squeeze() 76 | 77 | output_array = output_tensor.squeeze().detach().numpy() 78 | 79 | l1loss = nn.L1Loss() 80 | loss = l1loss(static_IR_map, output_tensor) 81 | 82 | print(f"L1 Loss: {loss.item():7.5f}") 83 | 84 | 85 | print("#################Done#################") 86 | -------------------------------------------------------------------------------- /session1/demo2_IR_helpers.py: -------------------------------------------------------------------------------- 1 | #BSD 3-Clause License 2 | # 3 | #Copyright (c) 2023, ASU-VDA-Lab 4 | # 5 | #Redistribution and use in source and binary forms, with or without 6 | #modification, are permitted provided that the following conditions are met: 7 | # 8 | #1. Redistributions of source code must retain the above copyright notice, this 9 | # list of conditions and the following disclaimer. 10 | # 11 | #2. Redistributions in binary form must reproduce the above copyright notice, 12 | # this list of conditions and the following disclaimer in the documentation 13 | # and/or other materials provided with the distribution. 14 | # 15 | #3. Neither the name of the copyright holder nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | #AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | #IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | #DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | #FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | #DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | #SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | import openroad as ord 30 | import pdn, odb, utl 31 | from openroad import Tech, Design 32 | from collections import defaultdict, namedtuple 33 | import argparse 34 | import numpy as np 35 | import math, os 36 | import torch 37 | import torch.nn as nn 38 | import torch.optim as optim 39 | import torch.nn.functional as F 40 | 41 | def OpenROAD_map_creation(map_type, tech_design, design, corner, congestion_layer, timing): 42 | block = ord.get_db_block() 43 | insts = block.getInsts() 44 | nets = block.getNets() 45 | ################################# 46 | #get unit for lef unit transform# 47 | ################################# 48 | unit = block.getDbUnitsPerMicron() 49 | ############### 50 | #get core bbox# 51 | ############### 52 | core_x0 = block.getCoreArea().xMin() 53 | core_y0 = block.getCoreArea().yMin() 54 | core_x1 = block.getCoreArea().xMax() 55 | core_y1 = block.getCoreArea().yMax() 56 | ############## 57 | #get die bbox# 58 | ############## 59 | die_x0 = block.getDieArea().xMin() 60 | die_y0 = block.getDieArea().yMin() 61 | die_x1 = block.getDieArea().xMax() 62 | die_y1 = block.getDieArea().yMax() 63 | ################ 64 | #get row height# 65 | ################ 66 | row_height = block.getRows()[1].getOrigin()[1] - block.getRows()[0].getOrigin()[1] 67 | #row_height = 20 68 | ################# 69 | #get track width# 70 | ################# 71 | track_width = block.getTrackGrids()[0].getGridX()[1] - block.getTrackGrids()[0].getGridX()[0] 72 | #track_width = 20 73 | ################ 74 | #get gcell grid# 75 | ################ 76 | gcell_grid_x = block.getGCellGrid().getGridX() 77 | gcell_grid_x_delta = gcell_grid_x[1] - gcell_grid_x[0] 78 | gcell_grid_y = block.getGCellGrid().getGridY() 79 | gcell_grid_y_delta = gcell_grid_y[1] - gcell_grid_y[0] 80 | ######################### 81 | #generate feature map(s)# 82 | ######################### 83 | image_width = (core_x1 - core_x0) // track_width 84 | if (core_x1 - core_x0)%track_width > 0: 85 | image_width += 1 86 | image_height = (core_y1 - core_y0) // row_height 87 | if (core_y1 - core_y0)%row_height > 0: 88 | image_height += 1 89 | ############################### 90 | #assign congestion and IR drop# 91 | ############################### 92 | if map_type == "static_IR" or map_type == "congestion": 93 | db_tech = tech_design.getDB().getTech() 94 | if map_type == "static_IR": 95 | feature_map = np.full((image_width, image_height), 0.0) 96 | #run pdn analysis# 97 | psm_obj = design.getPDNSim() 98 | psm_obj.setNet(ord.Tech().getDB().getChip().getBlock().findNet("VDD")) 99 | design.evalTclString(f"psm::set_corner [sta::cmd_corner]") 100 | psm_obj.analyzePowerGrid('', False, '', '') 101 | #extract value# 102 | layers = db_tech.getLayers() 103 | drops = psm_obj.getIRDropForLayer(layers[2]) 104 | for pt,v in drops.items(): 105 | if pt.x() < core_x0 or pt.x() > core_x1 or pt.y() < core_y0 or pt.y() > core_y1: 106 | continue 107 | 108 | anchor_x = core_x1 - 1 if pt.x() == core_x1 else pt.x() 109 | anchor_y = core_y1 - 1 if pt.y() == core_y1 else pt.y() 110 | 111 | if v > feature_map[(anchor_x - core_x0)//track_width][(anchor_y - core_y0)//row_height]: 112 | feature_map[(anchor_x - core_x0)//track_width][(anchor_y - core_y0)//row_height] = v 113 | return row_height, track_width, feature_map 114 | else: 115 | feature_map = np.full((image_width, image_height), -np.inf) 116 | layers = db_tech.getLayers() 117 | layer = layers[congestion_layer] 118 | min_ = np.inf 119 | for x in range(len(gcell_grid_x)): 120 | for y in range(len(gcell_grid_y)): 121 | capacity = block.getGCellGrid().getHorizontalCapacity(layer, x, y) 122 | usage = block.getGCellGrid().getHorizontalUsage(layer, x, y) 123 | if block.getGCellGrid().getHorizontalCapacity(layer, x, y) == 0: 124 | capacity = block.getGCellGrid().getVerticalCapacity(layer, x, y) 125 | usage = block.getGCellGrid().getVerticalUsage(layer, x, y) 126 | congestion = usage - capacity 127 | min_ = min([min_, congestion]) 128 | if gcell_grid_x[x] < core_x0: 129 | if gcell_grid_x[x] - core_x0 + gcell_grid_x_delta < core_x0: 130 | continue 131 | if gcell_grid_y[y] < core_y0: 132 | if gcell_grid_y[y] + gcell_grid_y_delta < core_y0: 133 | continue 134 | if gcell_grid_x[x] >= core_x1 or gcell_grid_y[y] >= core_y1: 135 | continue 136 | 137 | anchor_x = (gcell_grid_x[x] - core_x0)//track_width if gcell_grid_x[x] - core_x0 >= 0 else 0 138 | anchor_y = (gcell_grid_y[y] - core_y0)//row_height if gcell_grid_y[y] - core_y0 >= 0 else 0 139 | 140 | for delta_x in range(math.ceil(gcell_grid_x_delta/track_width)): 141 | for delta_y in range(math.ceil(gcell_grid_y_delta/row_height)): 142 | if anchor_x + delta_x >= feature_map.shape[0] or anchor_y + delta_y >= feature_map.shape[1]: 143 | continue 144 | if congestion > feature_map[int(anchor_x + delta_x)][int(anchor_y + delta_y)]: 145 | feature_map[int(anchor_x + delta_x)][int(anchor_y + delta_y)] = congestion 146 | for x in range(len(feature_map)): 147 | for y in range(len(feature_map[0])): 148 | if feature_map[x][y] == -np.inf: 149 | feature_map[x][y] = min_ 150 | return row_height, track_width, feature_map 151 | ################################################################################ 152 | #assign static_power and dynamic_power value by iterating through each instance# 153 | ################################################################################ 154 | if map_type == "static_power" or map_type == "dynamic_power": 155 | feature_map = np.full((image_width, image_height), 0.0) 156 | for inst in insts: 157 | ############### 158 | #get cell bbox# 159 | ############### 160 | inst_x0 = inst.getBBox().xMin() 161 | inst_y0 = inst.getBBox().yMin() 162 | inst_x1 = inst.getBBox().xMax() 163 | inst_y1 = inst.getBBox().yMax() 164 | 165 | anchor_index_x = (inst_x0 - core_x0) // track_width 166 | anchor_index_y = (inst_y0 - core_y0) // row_height 167 | ############# 168 | #get feature# 169 | ############# 170 | if map_type == "static_power": 171 | feature = timing.staticPower(inst, corner) 172 | feature /= ((inst_x1 - inst_x0) * (inst_y1 - inst_y0)) 173 | elif map_type == "dynamic_power": 174 | feature = timing.dynamicPower(inst, corner) 175 | feature /= ((inst_x1 - inst_x0) * (inst_y1 - inst_y0)) 176 | ################################################### 177 | #compute the amount of pixels covered by this cell# 178 | #in case there are non-interger-track-width cells # 179 | #or off track DRC # 180 | ################################################### 181 | covered_horizon_pixel_cnt = (inst_x1 - inst_x0) // track_width 182 | if (inst_x0 - core_x0) % track_width > 0: 183 | covered_horizon_pixel_cnt += 1 184 | if (inst_x1 - core_x0) % track_width > 0: 185 | covered_horizon_pixel_cnt += 1 186 | ############################################# 187 | #in case there are non-interger-height cells# 188 | ############################################# 189 | covered_vertical_pixel_cnt = (inst_y1 - inst_y0) // row_height 190 | if (inst_y0 - core_y0) % row_height > 0: 191 | covered_vertical_pixel_cnt += 1 192 | if (inst_y0 - core_y0) % row_height > 0: 193 | covered_vertical_pixel_cnt += 1 194 | ############## 195 | #assign value# 196 | ############## 197 | for y in range(covered_vertical_pixel_cnt): 198 | for x in range(covered_horizon_pixel_cnt): 199 | 200 | if y == 0 and y == covered_vertical_pixel_cnt -1: 201 | tmp_height = row_height 202 | elif y == 0 and y != covered_vertical_pixel_cnt -1: 203 | tmp_height = row_height - (inst_y0 % row_height) 204 | elif y != 0 and y == covered_vertical_pixel_cnt -1: 205 | tmp_height = inst_y1 % row_height 206 | if tmp_height == 0: 207 | tmp_height = row_height 208 | else: 209 | tmp_height = row_height 210 | 211 | if x == 0 and x == covered_horizon_pixel_cnt -1: 212 | tmp_width = inst_x1 - inst_x0 213 | elif x == 0 and x != covered_horizon_pixel_cnt -1: 214 | tmp_width = track_width - (inst_x0 % track_width) 215 | elif x != 0 and x == covered_horizon_pixel_cnt -1: 216 | tmp_width = inst_x1 % track_width 217 | if tmp_width == 0: 218 | tmp_width = track_width 219 | else: 220 | tmp_width = track_width 221 | 222 | cover_area = tmp_height * tmp_width 223 | feature_map[anchor_index_x + x][anchor_index_y + y] += feature * cover_area 224 | return row_height, track_width, feature_map 225 | 226 | def add_global_connection(design, *, 227 | net_name=None, 228 | inst_pattern=None, 229 | pin_pattern=None, 230 | power=False, 231 | ground=False, 232 | region=None): 233 | if net_name is None: 234 | utl.error(utl.PDN, 1501, "The net option for the " + 235 | "add_global_connection command is required.") 236 | 237 | if inst_pattern is None: 238 | inst_pattern = ".*" 239 | 240 | if pin_pattern is None: 241 | utl.error(utl.PDN, 1502, "The pin_pattern option for the " + 242 | "add_global_connection command is required.") 243 | 244 | net = design.getBlock().findNet(net_name) 245 | if net is None: 246 | net = odb.dbNet_create(design.getBlock(), net_name) 247 | 248 | if power and ground: 249 | utl.error(utl.PDN, 1551, "Only power or ground can be specified") 250 | elif power: 251 | net.setSpecial() 252 | net.setSigType("POWER") 253 | elif ground: 254 | net.setSpecial() 255 | net.setSigType("GROUND") 256 | 257 | # region = None 258 | if region is not None: 259 | region = design.getBlock().findRegion(region) 260 | if region is None: 261 | utl.error(utl.PDN, 1504, f"Region {region} not defined") 262 | 263 | design.getBlock().addGlobalConnect(region, inst_pattern, pin_pattern, net, True) 264 | 265 | def load_design(demo_path): 266 | #Read Files 267 | tech = Tech() 268 | libDir = demo_path/"platforms/lib/" 269 | lefDir = demo_path/"platforms/lef/" 270 | designDir = demo_path/"designs/" 271 | # Read technology files 272 | libFiles = libDir.glob('*.lib') 273 | techLefFiles = lefDir.glob('*tech.lef') 274 | lefFiles = lefDir.glob('*.lef') 275 | for libFile in libFiles: 276 | tech.readLiberty(libFile.as_posix()) 277 | for techLefFile in techLefFiles: 278 | tech.readLef(techLefFile.as_posix()) 279 | for lefFile in lefFiles: 280 | tech.readLef(lefFile.as_posix()) 281 | design = Design(tech) 282 | #Read design files 283 | defFile = designDir/'gcd.def' 284 | design.readDef(f"{defFile}") 285 | # Read the SDC file and set the clocks 286 | sdcFile = designDir/"gcd.sdc.gz" 287 | spefFile = designDir/"gcd.spef.gz" 288 | design.evalTclString(f"read_sdc {sdcFile}") 289 | design.evalTclString(f"read_spef {spefFile}") 290 | design.evalTclString("set_propagated_clock [all_clocks]") 291 | add_global_connection(design, net_name="VDD", pin_pattern="VDD", power=True) 292 | add_global_connection(design, net_name="VSS", pin_pattern="VSS", ground=True) 293 | odb.dbBlock.globalConnect(ord.get_db_block()) 294 | design.getGlobalRouter().globalRoute() 295 | return tech, design 296 | 297 | class UNet(nn.Module): 298 | def __init__(self): 299 | super(UNet, self).__init__() 300 | 301 | self.enc_conv1 = nn.Conv2d(5, 64, kernel_size=3, padding='same') 302 | self.enc_conv2 = nn.Conv2d(64, 128, kernel_size=3, padding='same') 303 | self.enc_conv3 = nn.Conv2d(128, 256, kernel_size=3, padding='same') 304 | self.enc_conv4 = nn.Conv2d(256, 512, kernel_size=3, padding='same') 305 | 306 | self.dec_conv1 = nn.Conv2d(512, 256, kernel_size=3, padding='same') 307 | self.dec_conv2 = nn.Conv2d(256, 128, kernel_size=3, padding='same') 308 | self.dec_conv3 = nn.Conv2d(128, 64, kernel_size=3, padding='same') 309 | self.dec_conv4 = nn.Conv2d(10, 1, kernel_size=3, padding='same') 310 | 311 | self.pool = nn.MaxPool2d(kernel_size=2, stride=2) 312 | self.upconv1 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2) 313 | self.upconv2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2) 314 | self.upconv3 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2) 315 | self.upconv4 = nn.ConvTranspose2d(64, 5, kernel_size=2, stride=2) 316 | 317 | def forward(self, x0): 318 | x0 = torch.cat(x0, dim=1) 319 | x1 = self.pool(nn.functional.relu(self.enc_conv1(x0))) 320 | x2 = self.pool(nn.functional.relu(self.enc_conv2(x1))) 321 | x3 = self.pool(nn.functional.relu(self.enc_conv3(x2))) 322 | x4 = self.pool(nn.functional.relu(self.enc_conv4(x3))) 323 | 324 | x = nn.functional.relu(self.upconv1(x4)) 325 | x = torch.cat([F.pad(x,(0,x3.size()[-1] - x.size()[-1],0,x3.size()[-2] - x.size()[-2]),'constant',0), x3], dim=1) 326 | x = nn.functional.relu(self.dec_conv1(x)) 327 | 328 | x = nn.functional.relu(self.upconv2(x)) 329 | x = torch.cat([F.pad(x,(0,x2.size()[-1] - x.size()[-1],0,x2.size()[-2] - x.size()[-2]),'constant',0), x2], dim=1) 330 | x = nn.functional.relu(self.dec_conv2(x)) 331 | 332 | x = nn.functional.relu(self.upconv3(x)) 333 | x = torch.cat([F.pad(x,(0,x1.size()[-1] - x.size()[-1],0,x1.size()[-2] - x.size()[-2]),'constant',0), x1], dim=1) 334 | x = nn.functional.relu(self.dec_conv3(x)) 335 | 336 | x = nn.functional.relu(self.upconv4(x)) 337 | x = torch.cat([F.pad(x,(0,x0.size()[-1] - x.size()[-1],0,x0.size()[-2] - x.size()[-2]),'constant',0), x0], dim=1) 338 | x = self.dec_conv4(x) 339 | 340 | return x 341 | -------------------------------------------------------------------------------- /session1/demo2_gate_sizing.py: -------------------------------------------------------------------------------- 1 | #BSD 3-Clause License 2 | # 3 | #Copyright (c) 2023, The Regents of the University of Minnesota 4 | # 5 | #All rights reserved. 6 | # 7 | #Redistribution and use in source and binary forms, with or without 8 | #modification, are permitted provided that the following conditions are met: 9 | # 10 | #* Redistributions of source code must retain the above copyright notice, this 11 | # list of conditions and the following disclaimer. 12 | # 13 | #* Redistributions in binary form must reproduce the above copyright notice, 14 | # this list of conditions and the following disclaimer in the documentation 15 | # and/or other materials provided with the distribution. 16 | # 17 | #* Neither the name of the copyright holder nor the names of its 18 | # contributors may be used to endorse or promote products derived from 19 | # this software without specific prior written permission. 20 | # 21 | #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 | #AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 | #IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 | #DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 | #FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 | #DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 | #SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 | #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | import faulthandler 32 | faulthandler.enable() 33 | 34 | import os 35 | import torch 36 | import torch.optim as optim 37 | import numpy as np 38 | import matplotlib 39 | import matplotlib.pyplot as plt 40 | import random 41 | from collections import namedtuple 42 | import dgl 43 | import math 44 | from tqdm import tqdm, trange 45 | from time import time 46 | import json 47 | import sys 48 | import openroad as ord 49 | from openroad import Tech, Design, Timing 50 | import copy 51 | from pathlib import Path 52 | 53 | 54 | from demo2_gate_sizing_helpers import * 55 | 56 | import argparse 57 | ############### 58 | #path argumant# 59 | ############### 60 | parser = argparse.ArgumentParser(description="path of your ASPDAC2024-Turotial clone (must include /ASPDAC2024-Turotial)") 61 | parser.add_argument("--path", type = Path, default='./', action = 'store') 62 | pyargs = parser.parse_args() 63 | ################### 64 | #set up matplotlib# 65 | ################### 66 | is_ipython = 'inline' in matplotlib.get_backend() 67 | if is_ipython: 68 | from IPython import display 69 | ################################## 70 | #use gpu or cpu(cpu for tutorial)# 71 | ################################## 72 | #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 73 | device = torch.device("cpu") 74 | #print(device) 75 | #################################################### 76 | #load cell dictionary with name and size properties# 77 | #################################################### 78 | with open(pyargs.path/'platforms/preprocessed_cell_dictionary.json','r') as f: 79 | cell_dict = json.load(f) 80 | ################################################### 81 | #create a lookup table for the index form the name# 82 | ################################################### 83 | cell_name_dict = {} 84 | for k,v in cell_dict.items(): 85 | cell_name_dict[v['name']] = k 86 | ######################################## 87 | #laod design using openroad python apis# 88 | ######################################## 89 | ord_tech, ord_design, timing, db, chip, block, nets = load_design(pyargs.path) 90 | ################################################################################ 91 | #srcs, dsts : source and destination instances for the graph function. # 92 | #inst_dict : Dictionary that stores all the properties of the instances. # 93 | #fanin_dict, fanout_dict : Dictionary that keeps a stores the fanin and # 94 | # fanout of the instances in an easily indexable way. # 95 | #endpoints : Storing all the endpoints(here they are flipflops) # 96 | ################################################################################ 97 | inst_dict, endpoints, srcs, dsts, fanin_dict, fanout_dict = \ 98 | iterate_nets_get_properties(ord_design, timing, nets, block, cell_dict, cell_name_dict) 99 | ################################################ 100 | #quick lookup for the instance name from the ID# 101 | ################################################ 102 | inst_names = {v['idx']:k for k,v in inst_dict.items()} 103 | # create DGL graph 104 | G = dgl.graph((srcs+dsts,dsts+srcs)) 105 | # store the featues for cell types, slack, slew, load, area, and max_size_index(for validity checks) 106 | G.ndata['cell_types'] = torch.tensor([ inst_dict[x]['cell_type'] for x in inst_names.values() ]) 107 | G.ndata['slack'] = torch.tensor( 108 | [ inst_dict[x]['slack'] for x in inst_names.values() ]) 109 | G.ndata['slew'] = torch.tensor( 110 | [ inst_dict[x]['slew'] for x in inst_names.values() ]) 111 | G.ndata['load'] = torch.tensor( 112 | [ inst_dict[x]['load'] for x in inst_names.values() ]) 113 | G.ndata['area'] = torch.tensor([ inst_dict[x]['area'] for x in inst_names.values() ]) 114 | G.ndata['max_size'] = torch.tensor([cell_dict[str(inst_dict[x]['cell_type'][0])]['n_sizes'] for x in inst_names.values()]) 115 | G.edata['types'] = torch.cat((torch.zeros(len(srcs),dtype=torch.long),torch.ones(len(dsts),dtype=torch.long)),0) 116 | # normalization parameters 117 | norm_data = { 118 | 'max_area' : 1.0*np.max(G.ndata['area'].numpy()), 119 | 'clk_period' : CLKset[0], 120 | 'max_slew' : 1.0*np.max(G.ndata['slew'].numpy()), 121 | 'max_load' : 1.0*np.max(G.ndata['load'].numpy()), 122 | } 123 | #print(norm_data) 124 | G.ndata['area'] = G.ndata['area']/norm_data['max_area'] 125 | G.ndata['slack'] = G.ndata['slack']/norm_data['clk_period'] 126 | for i in range(len(G.ndata['slack'])): 127 | if G.ndata['slack'][i] > float(1): 128 | G.ndata['slack'][i] = 1 129 | G.ndata['slack'][torch.isinf(G.ndata['slack'])] = 1 130 | G.ndata['slew'] = G.ndata['slew']/norm_data['max_slew'] 131 | G.ndata['load'] = G.ndata['load']/norm_data['max_load'] 132 | 133 | 134 | inital_total_area = torch.sum(G.ndata['area'])*norm_data['max_area']/(unit_micron*unit_micron) 135 | print(f"Initial total area: {inital_total_area:.4f}") 136 | #print(device) 137 | G = G.to(device) 138 | 139 | print("Number of Nodes:",G.num_nodes()) 140 | print("Number of Edges:",G.num_edges()) 141 | print("##############################################") 142 | Transition = namedtuple('Transition', 143 | ('graph', 'action', 'next_state', 'reward')) 144 | 145 | n_cells = max([int(x) for x in cell_dict.keys()]) + 1 146 | n_state= n_cells+n_features 147 | 148 | ############################################################### 149 | #Give an intial solution proportionaly to the original slacks.# 150 | ############################################################### 151 | Slack_Lambda = 1-G.ndata['slack'].to('cpu') 152 | ############################################################################# 153 | #Create the target and policy nets and ensure that they have the same value.# 154 | ############################################################################# 155 | policy_net = DQN(n_state, n_cells, n_features, device).to(device) 156 | target_net = DQN(n_state, n_cells, n_features, device).to(device) 157 | target_net.load_state_dict(policy_net.state_dict()) 158 | target_net.eval() 159 | 160 | optimizer = optim.RMSprop(policy_net.parameters(), lr = LR) 161 | #################### 162 | #Experience Storage# 163 | #################### 164 | memory = ReplayMemory(BUF_SIZE) 165 | steps_done = 0 166 | 167 | loss_history = [] 168 | episode_durations = [] 169 | 170 | episode_inst_dict = copy.deepcopy(inst_dict) 171 | episode_G = copy.deepcopy(G) 172 | 173 | print("Worst Negative slack initial:", torch.min(episode_G.ndata['slack']).numpy()*norm_data['clk_period']) 174 | print("Total Negative slack initial:", torch.sum(torch.min(episode_G.ndata['slack'],torch.zeros_like(episode_G.ndata['slack']))).numpy()) 175 | print("##############################################") 176 | episode_reward = [] 177 | update_loss = [] 178 | update_step = [] 179 | 180 | train_start_time = time() 181 | ############################### 182 | #no data points # 183 | #not inference in the tutorial# 184 | ############################### 185 | if inference: 186 | MAX_STEPS = 75 187 | min_slack_plot = torch.min(episode_G.ndata['slack']*norm_data['clk_period']).item() 188 | num_episodes = 1 189 | else: 190 | num_episodes = EPISODE 191 | 192 | pareto_points = [] 193 | pareto_cells = [] 194 | best_cost = calc_cost(episode_G, Slack_Lambda) 195 | reset_state = get_state_cells(episode_inst_dict, inst_names, cell_dict) 196 | data_v_episode = [] 197 | ################ 198 | #start training# 199 | ################ 200 | for i_episode in range(num_episodes): 201 | random_taken = 0 202 | total_taken = 0 203 | print("Episode :",i_episode) 204 | ###################################### 205 | #Initialize the environment and state# 206 | ###################################### 207 | episode_G, episode_inst_dict, working_clk = env_reset(reset_state, i_episode,\ 208 | cell_name_dict, CLKset, ord_design, timing, G, inst_dict, CLK_DECAY, CLK_DECAY_STRT,\ 209 | clk_init, clk_range, clk_final, inst_names, block, cell_dict, norm_data, device) 210 | best_cost = calc_cost(episode_G, Slack_Lambda) 211 | cumulative_reward = 0 212 | count_bads = 0 213 | ############################ 214 | #get current WNS, TNS, area# 215 | ############################ 216 | old_WNS = torch.min(episode_G.ndata['slack']) 217 | old_TNS = torch.sum(torch.min(episode_G.ndata['slack'], torch.zeros_like(episode_G.ndata['slack']))) 218 | old_area = torch.sum(episode_G.ndata['area']) 219 | episode_TNS = old_TNS 220 | episode_WNS = old_WNS 221 | episode_area = old_area 222 | for t in trange(MAX_STEPS): 223 | ############################## 224 | #select and perform an action# 225 | ############################## 226 | critical_nodes = get_critical_path_nodes(episode_G, i_episode, TOP_N_NODES, n_cells) 227 | critical_graph = get_subgraph(episode_G, critical_nodes) 228 | state = get_state(critical_graph, n_state, n_cells, n_features) 229 | action, total_taken, steps_done, random_taken\ 230 | = select_action(critical_graph, inference, total_taken,\ 231 | steps_done, random_taken, policy_net,\ 232 | EPS_END, EPS_START, EPS_DECAY, device) 233 | if action == -1: 234 | ####### 235 | #reset# 236 | ####### 237 | cost = calc_cost(episode_G, Slack_Lambda) 238 | if cost 0: 258 | solution.append(i) 259 | sizes.append(int(episode_G.ndata['cell_types'][:,1].T[i])) 260 | if t >= MAX_STEPS: 261 | done = 1 262 | 263 | if reward < 0: 264 | count_bads += 1 265 | else: 266 | count_bads = 0 267 | ######################################### 268 | #stop this episode if it's getting worse# 269 | ######################################### 270 | #if count_bads >= STOP_BADS: 271 | # print("Stopping bad actions") 272 | # count_bads = 0 273 | # done = 1 274 | 275 | cumulative_reward += reward 276 | if done: 277 | next_state = None 278 | next_state_push =None 279 | else: 280 | next_state_push = next_state 281 | 282 | ############################# 283 | #store into the reply buffer# 284 | ############################# 285 | if not inference: 286 | memory.push(critical_graph.clone(), action,next_state_push, reward) 287 | # Perform one step of the optimization (on the target network) 288 | optimizer, loss_history = optimize_model(memory, BATCH_SIZE, device, GAMMA,\ 289 | policy_net, target_net, optimizer, loss_history) 290 | 291 | if next_state != None: 292 | new_slacks = np.array([x['slack'] for x in episode_inst_dict.values()])/norm_data['clk_period'] 293 | new_slacks[np.isinf(new_slacks)] = 1 294 | for i in range(len(new_slacks)): 295 | if new_slacks[i] > float(1): 296 | new_slacks[i] = 1 297 | new_slacks = np.minimum(new_slacks,np.zeros_like(new_slacks)) 298 | new_WNS = np.min(new_slacks) 299 | new_TNS = np.sum(new_slacks) 300 | if new_WNS >max_WNS: 301 | max_WNS = new_WNS 302 | 303 | if new_TNS >max_TNS: 304 | max_TNS = new_TNS 305 | 306 | working_clk_period = (working_clk - new_WNS*norm_data['clk_period']).item() 307 | if working_clk_period < min_working_clk: 308 | min_working_clk = working_clk_period 309 | 310 | working_area = new_area*norm_data['max_area']/(unit_micron*unit_micron) 311 | 312 | point_time = time() 313 | ret = pareto(pareto_points, pareto_cells, float(working_area),\ 314 | working_clk_period, episode_inst_dict, inst_names,\ 315 | cell_dict, inst_dict, block, ord_design, timing) 316 | if(ret == 1): 317 | l= len(pareto_points) 318 | slacks = [min_slack(block.findITerm(x + cell_dict[str(inst_dict[x]['cell_type'][0])]['out_pin']), timing) for x in inst_names.values()] 319 | cost = calc_cost(episode_G, Slack_Lambda) 320 | if cost episode_TNS: 324 | episode_TNS = new_TNS 325 | if new_WNS> episode_WNS: 326 | episode_WNS = new_WNS 327 | if new_area < episode_area: 328 | episode_area = new_area 329 | else: 330 | new_WNS = torch.FloatTensor([0]) 331 | new_TNS = torch.FloatTensor([0]) 332 | 333 | working_clk_period = (working_clk-(new_WNS)*norm_data['clk_period']).item() 334 | if t%10 == 9 : 335 | Slack_Lambda = update_lambda(Slack_Lambda, episode_G.ndata['slack'].to('cpu'), K) 336 | ############ 337 | #output log# 338 | ############ 339 | if t%MAX_STEPS == MAX_STEPS -1: 340 | print(f"WNS updated: {max_WNS:.4e}") 341 | print(f"TNS updated: {max_TNS:.4e}") 342 | #st = time() 343 | 344 | old_WNS = new_WNS 345 | old_TNS = new_TNS 346 | 347 | # Move to the next state 348 | if done: 349 | episode_durations.append(t + 1) 350 | break 351 | if (len(loss_history)+1) % TARGET_UPDATE == 0:#(i_episode) % TARGET_UPDATE 352 | if (i_episode < UPDATE_STOP): 353 | target_net.load_state_dict(policy_net.state_dict()) 354 | if len(loss_history)>0: 355 | update_loss.append(loss_history[-1]) 356 | update_step.append(len(loss_history)-1) 357 | data_v_episode.append((float(episode_TNS*norm_data['clk_period']),float(episode_WNS*norm_data['clk_period']), float(episode_area*norm_data['max_area'] ))) 358 | episode_reward.append(cumulative_reward.item()) 359 | critical_nodes = get_critical_path_nodes(episode_G, i_episode, TOP_N_NODES, n_cells) 360 | 361 | ############## 362 | #end training# 363 | ############## 364 | #print(time()) 365 | 366 | sorted_pareto_points = np.array(sorted(pareto_points, key=lambda x: x[0])) 367 | 368 | data_v_episode = np.array(data_v_episode) 369 | G.num_nodes() 370 | #print(max(episode_reward)) 371 | print("#################Done#################") 372 | -------------------------------------------------------------------------------- /session1/demo2_gate_sizing_helpers.py: -------------------------------------------------------------------------------- 1 | #BSD 3-Clause License 2 | # 3 | #Copyright (c) 2023, The Regents of the University of Minnesota 4 | # 5 | #All rights reserved. 6 | # 7 | #Redistribution and use in source and binary forms, with or without 8 | #modification, are permitted provided that the following conditions are met: 9 | # 10 | #* Redistributions of source code must retain the above copyright notice, this 11 | # list of conditions and the following disclaimer. 12 | # 13 | #* Redistributions in binary form must reproduce the above copyright notice, 14 | # this list of conditions and the following disclaimer in the documentation 15 | # and/or other materials provided with the distribution. 16 | # 17 | #* Neither the name of the copyright holder nor the names of its 18 | # contributors may be used to endorse or promote products derived from 19 | # this software without specific prior written permission. 20 | # 21 | #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 | #AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 | #IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 | #DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 | #FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 | #DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 | #SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 | #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | import openroad as ord 32 | import pdn, odb, utl 33 | from openroad import Tech, Design, Timing 34 | from collections import defaultdict, namedtuple 35 | import numpy as np 36 | import torch 37 | import torch.nn as nn 38 | import torch.optim as optim 39 | import torch.nn.functional as F 40 | from dgl.nn.pytorch import GraphConv, RelGraphConv 41 | import random 42 | from itertools import count 43 | import dgl 44 | import copy 45 | from pathlib import Path 46 | 47 | # replay memory 48 | class ReplayMemory(object): 49 | 50 | def __init__(self, capacity): 51 | self.capacity = capacity 52 | self.memory = [] 53 | self.position = 0 54 | self.Transition = namedtuple('Transition', 55 | ('graph', 'action', 'next_state', 'reward')) 56 | 57 | # insert if not yet filled else treat it like a circular buffer and add. 58 | def push(self, *args): 59 | """Saves a transition.""" 60 | if len(self.memory) < self.capacity: 61 | self.memory.append(None) 62 | self.memory[self.position] = self.Transition(*args) 63 | self.position = (self.position + 1) % self.capacity 64 | 65 | # random sampling for the training step 66 | def sample(self, batch_size): 67 | return random.sample(self.memory, batch_size) 68 | 69 | def __len__(self): 70 | return len(self.memory) 71 | 72 | class DQN(nn.Module): 73 | 74 | def __init__(self, n_state, n_cells, n_features, device): 75 | super(DQN, self).__init__() 76 | self.conv1 = RelGraphConv(n_state,64,2) 77 | self.conv2 = RelGraphConv(64,64,2) 78 | self.conv3 = RelGraphConv(64,2,2) 79 | self.device = device 80 | self.n_state = n_state 81 | self.n_cells = n_cells 82 | self.n_features = n_features 83 | # Called with either one element to determine next action, or a batch 84 | # during optimization. Returns tensor([[left0exp,right0exp]...]). 85 | def forward(self, graph, x=None): 86 | if x is None: 87 | x = get_state(graph, self.n_state, self.n_cells, self.n_features).to(self.device) 88 | e = graph.edata['types'] 89 | x = F.relu(self.conv1(graph, x, e)) 90 | x = F.relu(self.conv2(graph, x, e)) 91 | x = self.conv3(graph, x, e) 92 | # Get a list of actions that are not valid and ensure they cant be selected. 93 | mask = generate_masked_actions(graph) 94 | x = x*(~mask) + (mask)*(x.min() - 1) 95 | return x 96 | 97 | def get_type(cell_type, cell_dict, cell_name_dict): 98 | cell, drive = cell_type.split("_") 99 | drive = "_"+drive 100 | if cell in cell_name_dict: 101 | cell_values = cell_dict[cell_name_dict[cell]] 102 | if drive in cell_values['sizes']: 103 | idx = cell_values['sizes'].index(drive) 104 | return int(cell_name_dict[cell]), idx 105 | else: 106 | print("Drive strength "+drive+" not found in cell :"+cell) 107 | print("Possible sizes"+cell_values['sizes']) 108 | return None,None 109 | else: 110 | print("cell: "+cell+" not in dictionary") 111 | return None,None 112 | 113 | def pin_properties(dbpin, CLKset, ord_design, timing): 114 | ITerms = dbpin.getNet().getITerms() 115 | #slack 116 | slack = min(timing.getPinSlack(dbpin, timing.Fall, timing.Max), timing.getPinSlack(dbpin, timing.Rise, timing.Max)) 117 | if slack < -0.5*CLKset[0]: 118 | slack = 0 119 | #slew 120 | slew = timing.getPinSlew(dbpin) 121 | #load 122 | #Corners = timing.getCorners() 123 | load = 0 124 | for ITerm in ITerms: 125 | if ITerm.isInputSignal(): 126 | new_load = 0 127 | for corner in timing.getCorners(): 128 | tmp_load = timing.getPortCap(ITerm, corner, timing.Max) 129 | if tmp_load > new_load: 130 | new_load = tmp_load 131 | load += new_load 132 | 133 | return slack, slew, load 134 | 135 | def min_slack(dbpin, timing): 136 | slack = min(timing.getPinSlack(dbpin, timing.Fall, timing.Max), timing.getPinSlack(dbpin, timing.Rise, timing.Max)) 137 | return slack 138 | 139 | def generate_masked_actions(graph): 140 | # max size keep track of the index of the maximum size. 141 | # If the current size is maximum size we mask it out as an action 142 | upper_mask = graph.ndata['cell_types'][:,1] >= graph.ndata['max_size']-1 143 | lower_mask = graph.ndata['cell_types'][:,1] == 0 144 | # if the criteria for the mask is met we replace it with the minimum 145 | # to make sure that that action is never chosen 146 | mask = torch.cat((upper_mask.view(-1,1), lower_mask.view(-1,1)),1) 147 | return mask 148 | 149 | def update_lambda(initial_lambda, slacks, K): 150 | Slack_Lambda = initial_lambda * ((1-slacks)**K) 151 | return Slack_Lambda 152 | 153 | def optimize_model(memory, BATCH_SIZE, device, GAMMA, policy_net,\ 154 | target_net, optimizer, loss_history): 155 | if len(memory) < BATCH_SIZE: 156 | return optimizer, loss_history 157 | transitions = memory.sample(BATCH_SIZE) 158 | Transition = namedtuple('Transition', ('graph', 'action', 'next_state', 'reward')) 159 | batch = Transition(*zip(*transitions)) 160 | action_batch = torch.cat(batch.action) 161 | reward_batch = torch.cat(batch.reward) 162 | # Compute Q(s_t, a) - the model computes Q(s_t), then we select the 163 | # columns of actions taken. These are the actions which would've been taken 164 | # for each batch state according to policy_net 165 | state_action_values = torch.zeros(BATCH_SIZE, device=device,dtype=torch.float32) 166 | for n_state, graph in enumerate(batch.graph): 167 | actions = policy_net(graph) 168 | state_action_values[n_state] = actions.view(-1)[action_batch[n_state,0]] 169 | # Compute V(s_{t+1}) for all next states. 170 | # Expected values of actions for non_final_next_states are computed based 171 | # on the "older" target_net; selecting their best reward with max(1)[0]. 172 | # This is merged based on the mask, such that we'll have either the expected 173 | # state value or 0 in case the state was final. 174 | next_state_values = torch.zeros(BATCH_SIZE, device=device,dtype=torch.float32) 175 | 176 | for n_state, state in enumerate(batch.next_state): 177 | if state is not None: 178 | graph = batch.graph[n_state] 179 | state_g = state.to(device) 180 | next_state_values[n_state] = target_net(graph, state_g.view(graph.num_nodes(),-1)).max().detach() 181 | expected_state_action_values = (next_state_values * GAMMA) + reward_batch 182 | 183 | # Compute Huber loss 184 | loss = F.smooth_l1_loss(state_action_values.unsqueeze(1), expected_state_action_values.unsqueeze(1)) 185 | 186 | # Optimize the model 187 | optimizer.zero_grad() 188 | loss.backward() 189 | loss_history.append(loss.item()) 190 | optimizer.step() 191 | return optimizer, loss_history 192 | 193 | def select_action(graph, inference = False, total_taken = False,\ 194 | steps_done = False, random_taken = False, policy_net = False,\ 195 | EPS_END = False, EPS_START = False, EPS_DECAY = False, device = False): 196 | total_taken +=1 197 | if inference: 198 | with torch.no_grad(): 199 | action = policy_net(graph) 200 | return torch.argmax(action.view(-1)).view(1,1), total_taken, steps_done, random_taken 201 | 202 | sample = random.random() 203 | eps_threshold = EPS_END + (EPS_START - EPS_END) * 0.95**(steps_done / EPS_DECAY) 204 | steps_done += 1 205 | #get the mask 206 | mask = generate_masked_actions(graph) 207 | 208 | if int(sum(~mask.view(-1)))==0 : 209 | return -1, total_taken, steps_done, random_taken 210 | #Threshold keeps decreasing, so over time it takes more from the policy net. 211 | if sample > eps_threshold: 212 | with torch.no_grad(): 213 | action = policy_net(graph) 214 | return torch.argmax(action.view(-1)).view(1,1), total_taken, steps_done, random_taken 215 | else: 216 | action = torch.randn_like(mask,dtype=torch.float32) 217 | action = (action-action.min()+1)*(~mask) 218 | random_taken+=1 219 | return torch.tensor([[torch.argmax(action.view(-1))]], device=device, dtype=torch.long),\ 220 | total_taken, steps_done, random_taken 221 | 222 | def get_subgraph(graph, nodes): 223 | node_set = {x.item() for x in nodes} 224 | #level 1 225 | in_nodes, _ = graph.in_edges(list(node_set)) 226 | _, out_nodes = graph.out_edges(list(node_set)) 227 | node_set.update(in_nodes.tolist()) 228 | node_set.update(out_nodes.tolist()) 229 | #level 2 230 | in_nodes, _ = graph.in_edges(list(node_set)) 231 | _, out_nodes = graph.out_edges(list(node_set)) 232 | node_set.update(in_nodes.tolist()) 233 | node_set.update(out_nodes.tolist()) 234 | 235 | subgraph = dgl.node_subgraph(graph, list(node_set)) 236 | 237 | return subgraph 238 | 239 | def get_critical_path_nodes(graph, ep_num, TOP_N_NODES, n_cells): 240 | topk = min(len(graph.ndata['slack'])-1 , int(TOP_N_NODES*(1+0.01*ep_num))) 241 | min_slacks, critical_path = torch.topk(graph.ndata['slack'], topk, largest=False) 242 | critical_path = critical_path[min_slacks<0] 243 | 244 | if critical_path.numel() <=0: 245 | critical_path = torch.arange(0,graph.num_nodes()) 246 | 247 | return critical_path 248 | 249 | def get_state(graph, n_state, n_cells, n_features): 250 | state = torch.zeros(graph.num_nodes(), n_state) 251 | state[:,-1] = graph.ndata['area'] 252 | state[:,-2] = graph.ndata['slack'] 253 | state[:,-3] = graph.ndata['slew'] 254 | state[:,-4] = graph.ndata['load'] 255 | state[:,:-n_features] =F.one_hot(graph.ndata['cell_types'][:,0],n_cells)*graph.ndata['cell_types'][:,1:2] 256 | return state 257 | 258 | def env_step(episode_G, graph, state, action, CLKset, ord_design, timing,\ 259 | cell_dict, norm_data, inst_names, episode_inst_dict, inst_dict,\ 260 | n_cells, n_features, block, device, Slack_Lambda, eps): 261 | next_state = state.clone() 262 | reward = 0 263 | done =0 264 | #based on the selected action you choose the approriate cell and upsize it or downsize 265 | cell_sub = int(action/2) 266 | cell = graph.ndata['_ID'][cell_sub].item() 267 | inst_name = inst_names[cell] 268 | cell_size = episode_inst_dict[inst_name]['cell_type'][1] 269 | cell_idx = episode_inst_dict[inst_name]['cell_type'][0] 270 | dbpin = block.findITerm(inst_name + cell_dict[str(inst_dict[inst_name]['cell_type'][0])]['out_pin']) 271 | old_slack = min_slack(dbpin, timing) 272 | o_master_name = cell_dict[str(cell_idx)]['name']+\ 273 | cell_dict[str(cell_idx)]['sizes'][cell_size] 274 | if(action%2 == 0): 275 | cell_size +=1 276 | else: 277 | cell_size -=1 278 | if(cell_size>=cell_dict[str(cell_idx)]['n_sizes']): 279 | print("Above max") 280 | print(action,cell_dict[str(cell_idx)]['n_sizes'], cell_idx, cell_size) 281 | if(cell_size<0): 282 | print("below min") 283 | print(action,cell_dict[str(cell_idx)]['n_sizes'], cell_idx, cell_size) 284 | episode_inst_dict[inst_name]['cell_type'] = (cell_idx,cell_size) 285 | size = cell_dict[str(cell_idx)]['sizesi'][cell_size] #actual size 286 | 287 | #one hot encode the relavant feature with the magnitude of size. 288 | next_state[cell_sub,:-n_features] = F.one_hot(torch.tensor([cell_idx]),n_cells)*size 289 | episode_G.ndata['cell_types'][cell] = torch.tensor((cell_idx,cell_size)) 290 | 291 | #replace the master node in the code and find the new slack, 292 | inst = block.findInst(inst_name) 293 | n_master_name = cell_dict[str(cell_idx)]['name']+\ 294 | cell_dict[str(cell_idx)]['sizes'][cell_size] 295 | db = ord.get_db() 296 | n_master = db.findMaster(n_master_name) 297 | inst.swapMaster(n_master) 298 | dbpin = block.findITerm(inst_name + cell_dict[str(inst_dict[inst_name]['cell_type'][0])]['out_pin']) 299 | new_slack = min_slack(dbpin, timing) 300 | 301 | old_area = episode_inst_dict[inst_name]['area']/ norm_data['max_area'] 302 | episode_inst_dict[inst_name]['area']= n_master.getWidth() * n_master.getHeight() 303 | new_area = episode_inst_dict[inst_name]['area']/ norm_data['max_area'] 304 | episode_G.ndata['area'][cell] = new_area 305 | 306 | # update_area 307 | next_state[cell_sub,-1] = new_area 308 | reward += torch.tensor(old_area-new_area) 309 | old_slacks = torch.zeros(len(episode_inst_dict.keys())) 310 | new_slacks = torch.zeros(len(episode_inst_dict.keys())) 311 | new_slews = torch.zeros(len(episode_inst_dict.keys())) 312 | new_loads = torch.zeros(len(episode_inst_dict.keys())) 313 | 314 | for n, inst in inst_names.items(): 315 | old_slacks[n] = episode_inst_dict[inst]['slack'] 316 | tmp_db_pin = block.findITerm(inst+cell_dict[str(episode_inst_dict[inst]['cell_type'][0])]['out_pin']) 317 | if tmp_db_pin.getNet() != None: 318 | (episode_inst_dict[inst]['slack'], 319 | episode_inst_dict[inst]['slew'], 320 | episode_inst_dict[inst]['load']) = pin_properties(tmp_db_pin, CLKset, ord_design, timing) 321 | new_slacks[n] = episode_inst_dict[inst]['slack'] 322 | new_slews[n] = episode_inst_dict[inst]['slew'] 323 | new_loads[n] = episode_inst_dict[inst]['load'] 324 | episode_G.ndata['slack'] = new_slacks.to(device)/norm_data['clk_period'] 325 | for i in range(len(episode_G.ndata['slack'])): 326 | if episode_G.ndata['slack'][i] > 1: 327 | episode_G.ndata['slack'][i] = 1 328 | episode_G.ndata['slack'][torch.isinf(episode_G.ndata['slack'])] = 1 329 | episode_G.ndata['slew'] = new_slews.to(device)/ norm_data['max_slew'] 330 | episode_G.ndata['load'] = new_loads.to(device)/ norm_data['max_load'] 331 | 332 | next_state[:,-2] = torch.tensor([episode_inst_dict[inst_names[x.item()]]['slack'] 333 | for x in graph.ndata['_ID']])\ 334 | / norm_data['clk_period'] 335 | next_state[:,-3] = torch.tensor([episode_inst_dict[inst_names[x.item()]]['slew'] 336 | for x in graph.ndata['_ID']])\ 337 | / norm_data['max_slew'] 338 | next_state[:,-4] = torch.tensor([episode_inst_dict[inst_names[x.item()]]['load'] 339 | for x in graph.ndata['_ID']])\ 340 | / norm_data['max_load'] 341 | next_state[torch.isinf(next_state[:,-2]),-2] = 1 # remove infinity from primary outputs. hadle it better. 342 | 343 | #Check TNS 344 | new_TNS = torch.min(new_slacks,torch.zeros_like(new_slacks)) 345 | new_TNS = Slack_Lambda*new_TNS 346 | 347 | old_TNS = torch.min(old_slacks,torch.zeros_like(old_slacks)) 348 | old_TNS = Slack_Lambda*old_TNS 349 | 350 | factor = torch.max(torch.abs(0.1*old_TNS), eps*torch.ones_like(old_TNS)) 351 | factor = torch.max(torch.ones_like(old_TNS), 1/factor) 352 | reward += (torch.sum((new_TNS - old_TNS) * factor)).item() 353 | 354 | return reward, done, next_state, episode_inst_dict, episode_G 355 | 356 | def env_reset(reset_state = None, episode_num = None, cell_name_dict = None,\ 357 | CLKset = None, ord_design = None, timing = None, G = None,\ 358 | inst_dict = None, CLK_DECAY = None, CLK_DECAY_STRT = None,\ 359 | clk_init = None, clk_range = None, clk_final = None, inst_names = None,\ 360 | block = None, cell_dict = None, norm_data = None, device = None): 361 | episode_G = copy.deepcopy(G) 362 | episode_inst_dict = copy.deepcopy(inst_dict) 363 | 364 | if episode_num is not None: 365 | 366 | if episode_num 1: 414 | episode_G.ndata['slack'][i] = 1 415 | episode_G.ndata['slack'][torch.isinf(episode_G.ndata['slack'])] = 1 416 | episode_G.ndata['slew'] = new_slews.to(device)/ norm_data['max_slew'] 417 | episode_G.ndata['load'] = new_loads.to(device)/ norm_data['max_load'] 418 | 419 | return episode_G, episode_inst_dict, clk 420 | 421 | 422 | def calc_cost(ep_G, Slack_Lambda): 423 | cost = torch.sum(ep_G.ndata['area'].to('cpu')) 424 | x = ep_G.ndata['slack'].to('cpu') 425 | new_slacks = torch.min(x, torch.zeros_like(x)) 426 | cost += torch.sum(Slack_Lambda*(-new_slacks)) 427 | return cost 428 | 429 | def get_state_cells(ep_dict, inst_names, cell_dict): 430 | cells = [] 431 | for x in inst_names.values(): 432 | cell_size = ep_dict[x]['cell_type'][1] 433 | cell_idx = ep_dict[x]['cell_type'][0] 434 | cell_name = cell_dict[str(cell_idx)]['name']+\ 435 | cell_dict[str(cell_idx)]['sizes'][cell_size] 436 | cells.append(cell_name) 437 | return cells 438 | 439 | def pareto(pareto_points, pareto_cells, area, clk, ep_dict, inst_names,\ 440 | cell_dict, inst_dict, block, ord_design, timing): 441 | cells = get_state_cells(ep_dict, inst_names, cell_dict) 442 | if len(pareto_points) <= 0: 443 | pareto_points.append((area, clk)) 444 | pareto_cells.append(cells) 445 | return 1 446 | dominated_points = set() 447 | for n, pareto_point in enumerate(pareto_points): 448 | # if new point is dominated we skip 449 | if pareto_point[0] <= area and pareto_point[1] <= clk: 450 | return 0 451 | # if new point dominates any other point 452 | elif pareto_point[0] >= area and pareto_point[1] >= clk: 453 | dominated_points.add(n) 454 | 455 | pareto_points.append((area, clk)) 456 | pareto_cells.append(cells) 457 | pareto_points = [val for n, val in enumerate(pareto_points) if n not in dominated_points] 458 | pareto_cells = [val for n, val in enumerate(pareto_cells) if n not in dominated_points] 459 | slacks = [min_slack(block.findITerm(x + cell_dict[str(inst_dict[x]['cell_type'][0])]['out_pin']), timing) for x in inst_names.values()] 460 | test_sl = np.min(slacks) 461 | return 1 462 | 463 | def rmdir(directory): 464 | directory=Path(directory) 465 | for item in directory.iterdir(): 466 | if item.is_dir(): 467 | rmdir(directory) 468 | else: 469 | item.unlink() 470 | directory.rmdir() 471 | 472 | unit_micron = 2000 473 | design = 'pid' 474 | semi_opt_clk = '0.65' 475 | clock_name = "i_clk" 476 | CLK_DECAY=100; CLK_DECAY_STRT=25 477 | n_features = 4 478 | BATCH_SIZE = 64#128 479 | GAMMA = 0.99 480 | EPS_START = 0.99 481 | EPS_END = 0.05 482 | EPS_DECAY = 200 483 | LR = 0.001 484 | BUF_SIZE = 1500#10000 485 | STOP_BADS = 50 486 | MAX_STEPS = 50#150#200 c432#51#300 487 | TARGET_UPDATE = MAX_STEPS*25 #MAX_STEPS*5 488 | EPISODE = 2 #150 # c880 #50 c432 #15#200 489 | LOADTH = 0 490 | DELTA = 0.000001 491 | UPDATE_STOP = 250 492 | 493 | TOP_N_NODES = 100 494 | eps = 1e-5 495 | inference = False 496 | retrain = False 497 | max_TNS = -100 498 | max_WNS = -100 499 | count_bads = 0 500 | best_delay = 5 501 | min_working_clk = 100 502 | K = 4# set seprately to critical and non critical # accelration factor 503 | CLKset = [0.6] 504 | clk_final = CLKset[0] 505 | clk_range = 0.98*(float(semi_opt_clk) - CLKset[0]) 506 | clk_init = clk_final + clk_range 507 | 508 | 509 | def load_design(path): 510 | ord_tech = Tech() 511 | lib_file = path/"platforms/lib/NangateOpenCellLibrary_typical.lib" 512 | lef_file = path/"platforms/lef/NangateOpenCellLibrary.macro.lef" 513 | tech_lef_file = path/"platforms/lef/NangateOpenCellLibrary.tech.lef" 514 | ord_tech.readLiberty(lib_file.as_posix()) 515 | ord_tech.readLef(tech_lef_file.as_posix()) 516 | ord_tech.readLef(lef_file.as_posix()) 517 | ord_design = Design(ord_tech) 518 | timing = Timing(ord_design) 519 | design_file = path/f"designs/{design}_{semi_opt_clk}.v" 520 | ord_design.readVerilog(design_file.as_posix()) 521 | ord_design.link(design) 522 | ord_design.evalTclString("create_clock [get_ports i_clk] -name core_clock -period " + str(clk_init*1e-9)) 523 | db = ord.get_db() 524 | chip = db.getChip() 525 | block = ord.get_db_block() 526 | nets = block.getNets() 527 | return ord_tech, ord_design, timing, db, chip, block, nets 528 | 529 | 530 | def iterate_nets_get_properties(ord_design, timing, nets, block, cell_dict, cell_name_dict): 531 | #This must eventually be put into a create graph function. 532 | #source and destination instances for the graph function. 533 | srcs = [] 534 | dsts = [] 535 | #Dictionary that stores all the properties of the instances. 536 | inst_dict = {} 537 | #Dictionary that keeps a stores the fanin and fanout of the instances in an easily indexable way. 538 | fanin_dict = {} 539 | fanout_dict = {} 540 | #storing all the endpoints(here they are flipflops) 541 | endpoints = [] 542 | for net in nets: 543 | iterms = net.getITerms() 544 | net_srcs = [] 545 | net_dsts = [] 546 | # create/update the instance dictionary for each net. 547 | for s_iterm in iterms: 548 | inst = s_iterm.getInst() 549 | inst_name = s_iterm.getInst().getName() 550 | term_name = s_iterm.getInst().getName() + "/" + s_iterm.getMTerm().getName() 551 | cell_type = s_iterm.getInst().getMaster().getName() 552 | 553 | if inst_name not in inst_dict: 554 | i_inst = block.findInst(inst_name) 555 | m_inst = i_inst.getMaster() 556 | area = m_inst.getWidth() * m_inst.getHeight() 557 | inst_dict[inst_name] = { 558 | 'idx':len(inst_dict), 559 | 'cell_type_name':cell_type, 560 | 'cell_type':get_type(cell_type, cell_dict, cell_name_dict), 561 | 'slack':0, 562 | 'slew':0, 563 | 'load':0, 564 | 'cin':0, 565 | 'area': area} 566 | if s_iterm.isInputSignal(): 567 | net_dsts.append((inst_dict[inst_name]['idx'],term_name)) 568 | if inst_dict[inst_name]['cell_type'][0] == 16: # check for flipflops 569 | endpoints.append(inst_dict[inst_name]['idx']) 570 | elif s_iterm.isOutputSignal(): 571 | net_srcs.append((inst_dict[inst_name]['idx'],term_name)) 572 | (inst_dict[inst_name]['slack'], 573 | inst_dict[inst_name]['slew'], 574 | inst_dict[inst_name]['load'])= pin_properties(s_iterm, CLKset, ord_design, timing) 575 | else: 576 | print("Should not be here") 577 | # list the connections for the graph creation step and the fainin/fanout dictionaries 578 | for src,src_term in net_srcs: 579 | for dst,dst_term in net_dsts: 580 | srcs.append(src) 581 | dsts.append(dst) 582 | src_key = list(inst_dict.keys())[src] 583 | dst_key = list(inst_dict.keys())[dst] 584 | if src_key in fanout_dict.keys(): 585 | fanout_dict[src_key].append(dst_key) 586 | else: 587 | fanout_dict[src_key] = [dst_key] 588 | if dst_key in fanin_dict.keys(): 589 | fanin_dict[dst_key].append(src_key) 590 | else: 591 | fanin_dict[dst_key] = [src_key] 592 | return inst_dict, endpoints, srcs, dsts, fanin_dict, fanout_dict 593 | 594 | 595 | -------------------------------------------------------------------------------- /session2/demo3_LPG_query_example.py: -------------------------------------------------------------------------------- 1 | #BSD 3-Clause License 2 | # 3 | #Copyright (c) 2023, ASU-VDA-Lab 4 | # 5 | #Redistribution and use in source and binary forms, with or without 6 | #modification, are permitted provided that the following conditions are met: 7 | # 8 | #1. Redistributions of source code must retain the above copyright notice, this 9 | # list of conditions and the following disclaimer. 10 | # 11 | #2. Redistributions in binary form must reproduce the above copyright notice, 12 | # this list of conditions and the following disclaimer in the documentation 13 | # and/or other materials provided with the distribution. 14 | # 15 | #3. Neither the name of the copyright holder nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | #AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | #IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | #DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | #FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | #DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | #SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | import graph_tool as gt 30 | import pickle 31 | import numpy as np 32 | import pandas as pd 33 | import sys 34 | import argparse 35 | from pathlib import Path 36 | 37 | ############################################################################################# 38 | #LPG dataframe properties: # 39 | #('id' maps to the node id in LPG) # 40 | #pin_df: 'name', 'x', 'y', 'is_in_clk', 'is_port', 'is_start', 'is_end', 'dir', # 41 | # 'maxcap', 'maxtran', 'num_reachable_endpoint', 'cellname', 'netname', # 42 | # 'tran', 'slack', 'risearr', 'fallarr', 'cap', 'is_macro', 'is_seq', # 43 | # 'is_buf', 'is_inv', 'new_cellname', 'new_netname', 'id' # 44 | # # 45 | #cell_df: 'name', 'is_seq', 'is_macro', 'is_in_clk', 'x0', 'y0', 'x1', 'y1', # 46 | # 'is_buf', 'is_inv', 'ref', 'staticpower', 'dynamicpower', 'x', 'y', # 47 | # 'new_cellname', 'id' # 48 | # # 49 | #net_df: 'name', 'net_route_length', 'net_steiner_length', 'fanout', 'total_cap', # 50 | # 'net_cap', 'net_coupling', 'net_res', 'id' # 51 | # # 52 | #fo4_df: 'ref', 'func_id', 'libcell_area', 'worst_input_cap', 'libcell_leakage', # 53 | # 'fo4_delay', 'libcell_delay_fixed_load', 'libcell_id' # 54 | # # 55 | #pin_pin_df: 'src', 'tar', 'src_type', 'tar_type', 'is_net', 'arc_delay', 'src_id', 'tar_id'# 56 | #cell_pin_df: 'src', 'tar', 'src_type', 'tar_type', 'src_id', 'tar_id' # 57 | #net_pin_df: 'src', 'tar', 'src_type', 'tar_type', 'src_id', 'tar_id' # 58 | #net_cell_df: 'src', 'tar', 'src_type', 'tar_type', 'src_id', 'tar_id' # 59 | #cell_cell_df: 'src', 'tar', 'src_type', 'tar_type', 'src_id', 'tar_id' # 60 | #edge_df: 'src_id', 'tar_id', 'e_type' # 61 | # (etype: 0-p_p, 1-c_p, 2-n_p, 3-n_c, 4-c_c) # 62 | ############################################################################################# 63 | 64 | if __name__ == "__main__": 65 | ################################################# 66 | #parse args and import functions from CircuitOps# 67 | ################################################# 68 | parser = argparse.ArgumentParser(description='path of your CircuitOps clone and the file of generate_LPG_from_tables.py') 69 | parser.add_argument('--path_IR', type = Path, default='./CircuitOps/IRs/nangate45/gcd/', action = 'store') 70 | parser.add_argument('--path_CircuitOps', type = Path, default='./CircuitOps/', action = 'store') 71 | parser.add_argument('--use_pd', default = False, action = 'store_true') 72 | parser.add_argument('--path_LPG_gen_func', type = Path, default='./CircuitOps/src/python/', action = 'store') 73 | pyargs = parser.parse_args() 74 | 75 | sys.path.append(str(pyargs.path_LPG_gen_func)) 76 | from generate_LPG_from_tables import generate_LPG_from_tables 77 | 78 | ###################### 79 | #get feature from LPG# 80 | ###################### 81 | LPG, pin_df, cell_df, net_df, fo4_df, pin_pin_df, cell_pin_df, \ 82 | net_pin_df, net_cell_df, cell_cell_df, edge_df, v_type, e_type \ 83 | = generate_LPG_from_tables(data_root = f"{pyargs.path_IR}/") if not pyargs.use_pd else \ 84 | generate_LPG_from_tables(data_root = f"{pyargs.path_CircuitOps}/", use_python_api = pyargs.use_pd, write_table = False) 85 | 86 | sys.path.remove(str(pyargs.path_LPG_gen_func)) 87 | ### get dimensions 88 | N_pin, _ = pin_df.shape 89 | N_cell, _ = cell_df.shape 90 | N_net, _ = net_df.shape 91 | total_v_cnt = N_pin+N_cell+N_net 92 | 93 | N_pin_pin, _ = pin_pin_df.shape 94 | N_cell_pin, _ = cell_pin_df.shape 95 | N_net_pin, _ = net_pin_df.shape 96 | N_net_cell, _ = net_cell_df.shape 97 | N_cell_cell, _ = cell_cell_df.shape 98 | total_e_cnt = N_pin_pin + N_cell_pin + N_net_pin + N_net_cell + N_cell_cell 99 | 100 | #string type properties not supported 101 | LPG_pin_slack = LPG.new_vp("float") 102 | LPG_pin_slack.a[0:N_pin] = pin_df["slack"].to_numpy() 103 | 104 | LPG_pin_risearr = LPG.new_vp("float") 105 | LPG_pin_risearr.a[0:N_pin] = pin_df["risearr"].to_numpy() 106 | 107 | LPG_pin_cap = LPG.new_vp("float") 108 | LPG_pin_cap.a[0:N_pin] = pin_df["cap"].to_numpy() 109 | 110 | LPG_cell_is_seq = LPG.new_vp("bool") 111 | LPG_cell_is_seq.a[N_pin:N_pin+N_cell] = cell_df["is_seq"].to_numpy() 112 | 113 | LPG_net_net_route_length = LPG.new_vp("float") 114 | LPG_net_net_route_length.a[N_pin+N_cell:N_pin+N_cell+N_net] = net_df["net_route_length"].to_numpy() 115 | 116 | v_props = LPG.get_vertices(vprops = [LPG_pin_slack, LPG_pin_risearr, LPG_pin_cap, LPG_cell_is_seq, LPG_net_net_route_length]) 117 | #v_props will contain the node index, pin_slack, pin_rise_arrival_time, pin_cap, cell_is_seq, net_route_length 118 | print("Index | slack | rise_arrival | cap | is_seq | route_length") 119 | for v_row in v_props[0:10,:]: 120 | print(f"{int(v_row[0]):5d} | {v_row[1]:7.4e} | {v_row[2]:12.4e} | {v_row[3]:11.4e} | {bool(v_row[4])!s:6} | {v_row[5]:7.4e}") 121 | 122 | -------------------------------------------------------------------------------- /session2/demo4_preroute_net_delay_prediction.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import pandas as pd 16 | import numpy as np 17 | from graph_tool.all import * 18 | from numpy.random import * 19 | import time 20 | import graph_tool as gt 21 | import sys 22 | from sklearn.ensemble import RandomForestRegressor 23 | from sklearn.model_selection import train_test_split 24 | from sklearn import preprocessing 25 | import matplotlib.pyplot as plt 26 | from demo4_preroute_net_delay_prediction_helpers import * 27 | 28 | ################## 29 | # read IR tables # 30 | ################## 31 | pin_df, cell_df, net_df, pin_pin_df, cell_pin_df, net_pin_df, net_cell_df, cell_cell_df, fo4_df = \ 32 | read_tables_OpenROAD("./CircuitOps/IRs/nangate45/gcd/") 33 | 34 | ###################### 35 | # rename dfs columns # 36 | ###################### 37 | pin_df = pin_df.rename(columns={ \ 38 | "pin_name":"name", "cell_name":"cellname", "net_name":"netname", \ 39 | "pin_tran":"tran", "pin_slack":"slack", "pin_rise_arr":"risearr", \ 40 | "pin_fall_arr":"fallarr", "input_pin_cap":"cap", "is_startpoint":"is_start", \ 41 | "is_endpoint":"is_end"}) 42 | cell_df = cell_df.rename(columns={ \ 43 | "cell_name":"name", "libcell_name":"ref", "cell_static_power":"staticpower", \ 44 | "cell_dynamic_power":"dynamicpower"}) 45 | net_df = net_df.rename(columns={"net_name":"name"}) 46 | fo4_df = fo4_df.rename(columns={"libcell_name":"ref"}) 47 | 48 | ########################################################## 49 | # add is_macro, is_seq to pin_df, change pin_dir to bool # 50 | ########################################################## 51 | cell_type_df = cell_df.loc[:,["name", "is_macro", "is_seq"]] 52 | cell_type_df = cell_type_df.rename(columns={"name":"cellname"}) 53 | pin_df = pin_df.merge(cell_type_df, on="cellname", how="left") 54 | pin_df["is_macro"] = pin_df["is_macro"].fillna(False) 55 | pin_df["is_seq"] = pin_df["is_seq"].fillna(False) 56 | pin_df["dir"] = (pin_df["dir"] == 0) 57 | fo4_df["libcell_id"] = range(fo4_df.shape[0]) 58 | 59 | ### get cell center loc 60 | cell_df["x"] = 0.5*(cell_df.x0 + cell_df.x1) 61 | cell_df["y"] = 0.5*(cell_df.y0 + cell_df.y1) 62 | 63 | ### add is_buf is_inv to pin_df 64 | cell_type_df = cell_df.loc[:,["name", "is_buf", "is_inv"]] 65 | cell_type_df = cell_type_df.rename(columns={"name":"cellname"}) 66 | pin_df = pin_df.merge(cell_type_df, on="cellname", how="left") 67 | pin_df["is_buf"] = pin_df["is_buf"].fillna(False) 68 | pin_df["is_inv"] = pin_df["is_inv"].fillna(False) 69 | 70 | ### rename cells and nets 71 | cell_df, pin_df = rename_cells(cell_df, pin_df) 72 | net_df, pin_df = rename_nets(net_df, pin_df) 73 | 74 | ### get dimensions 75 | N_pin, _ = pin_df.shape 76 | N_cell, _ = cell_df.shape 77 | N_net, _ = net_df.shape 78 | total_v_cnt = N_pin+N_cell+N_net 79 | pin_df['id'] = range(N_pin) 80 | cell_df['id'] = range(N_pin, N_pin+N_cell) 81 | net_df['id'] = range(N_pin+N_cell, total_v_cnt) 82 | 83 | ### generate edge_df 84 | pin_pin_df, cell_pin_df, net_pin_df, net_cell_df, cell_cell_df, edge_df = \ 85 | generate_edge_df_OpenROAD(pin_df, cell_df, net_df, pin_pin_df, cell_pin_df, net_pin_df, net_cell_df, cell_cell_df) 86 | 87 | ### get edge dimensions 88 | N_pin_pin, _ = pin_pin_df.shape 89 | N_cell_pin, _ = cell_pin_df.shape 90 | N_net_pin, _ = net_pin_df.shape 91 | N_net_cell, _ = net_cell_df.shape 92 | N_cell_cell, _ = cell_cell_df.shape 93 | total_e_cnt = N_pin_pin + N_cell_pin + N_net_pin + N_net_cell + N_cell_cell 94 | 95 | edge_df["e_type"] = 0 # pin_pin 96 | # edge_df.loc[0:N_pin_edge,["is_net"]] = pin_edge_df.loc[:, "is_net"] 97 | edge_df.loc[N_pin_pin : N_pin_pin+N_cell_pin, ["e_type"]] = 1 # cell_pin 98 | edge_df.loc[N_pin_pin+N_cell_pin : N_pin_pin+N_cell_pin+N_net_pin, ["e_type"]] = 2 # net_pin 99 | edge_df.loc[N_pin_pin+N_cell_pin+N_net_pin : N_pin_pin+N_cell_pin+N_net_pin+N_net_cell, ["e_type"]] = 3 # net_cell 100 | edge_df.loc[N_pin_pin+N_cell_pin+N_net_pin+N_net_cell : N_pin_pin+N_cell_pin+N_net_pin+N_net_cell+N_cell_cell, ["e_type"]] = 4 # cell_cell 101 | 102 | ############ 103 | #create LPG# 104 | ############ 105 | ### generate graph 106 | g = Graph() 107 | g.add_vertex(total_v_cnt) 108 | v_type = g.new_vp("int") 109 | v_type.a[0:N_pin] = 0 # pin 110 | v_type.a[N_pin:N_pin+N_cell] = 1 # cell 111 | v_type.a[N_pin+N_cell:total_v_cnt] = 2 # net 112 | 113 | ### add edge to graph 114 | e_type = g.new_ep("int") 115 | 116 | print("num of nodes, num of edges: ", g.num_vertices(), g.num_edges()) 117 | g.add_edge_list(edge_df.values.tolist(), eprops=[e_type]) 118 | print("num of nodes, num of edges: ", g.num_vertices(), g.num_edges()) 119 | 120 | ### processing fo4 table 121 | fo4_df["group_id"] = pd.factorize(fo4_df.func_id)[0] + 1 122 | fo4_df["libcell_id"] = range(fo4_df.shape[0]) 123 | libcell_np = fo4_df.to_numpy() 124 | 125 | ### assign cell size class 126 | fo4_df["size_class"] = 0 127 | fo4_df["size_class2"] = 0 128 | fo4_df["size_cnt"] = 0 129 | class_cnt = 50 130 | for i in range(fo4_df.group_id.min(), fo4_df.group_id.max()+1): 131 | temp = fo4_df.loc[fo4_df.group_id==i, ["group_id", "fix_load_delay"]] 132 | temp = temp.sort_values(by=['fix_load_delay'], ascending=False) 133 | fo4_df.loc[temp.index, ["size_class"]] = range(len(temp)) 134 | fo4_df.loc[temp.index, ["size_cnt"]] = len(temp) 135 | 136 | temp["size_cnt"] = 0 137 | MIN = temp.fix_load_delay.min() 138 | MAX = temp.fix_load_delay.max() 139 | interval = (MAX-MIN)/class_cnt 140 | for j in range(1, class_cnt): 141 | delay_h = MAX - j*interval 142 | delay_l = MAX - (j+1)*interval 143 | if j == (class_cnt-1): 144 | delay_l = MIN 145 | temp.loc[(temp.fix_load_delay < delay_h) & (temp.fix_load_delay >= delay_l), ["size_cnt"]] = j 146 | fo4_df.loc[temp.index, ["size_class2"]] = temp["size_cnt"] 147 | 148 | cell_fo4 = fo4_df.loc[:,["ref", "fo4_delay", "fix_load_delay", "group_id", "libcell_id", "size_class", "size_class2", "size_cnt"]] 149 | cell_df = cell_df.merge(cell_fo4, on="ref", how="left") 150 | cell_df["libcell_id"] = cell_df["libcell_id"].fillna(-1) 151 | 152 | ### add node and edge ids 153 | v_id = g.new_ep("int") 154 | v_id.a = range(v_id.a.shape[0]) 155 | 156 | e_id = g.new_ep("int") 157 | e_id.a = range(e_id.a.shape[0]) 158 | 159 | ### add pin properties to LPG ### 160 | v_x = g.new_vp("float") 161 | v_y = g.new_vp("float") 162 | v_is_in_clk = g.new_vp("bool") 163 | v_is_port = g.new_vp("bool") 164 | v_is_start = g.new_vp("bool") 165 | v_is_end = g.new_vp("bool") 166 | v_dir = g.new_vp("bool") 167 | v_maxcap = g.new_vp("float") 168 | v_maxtran = g.new_vp("float") 169 | v_num_reachable_endpoint = g.new_vp("int") 170 | v_tran = g.new_vp("float") 171 | v_slack = g.new_vp("float") 172 | v_risearr = g.new_vp("float") 173 | v_fallarr = g.new_vp("float") 174 | v_cap = g.new_vp("float") 175 | v_is_macro = g.new_vp("bool") 176 | v_is_seq = g.new_vp("bool") 177 | v_is_buf = g.new_vp("bool") 178 | v_is_inv = g.new_vp("bool") 179 | 180 | 181 | v_x.a[0:N_pin] = pin_df["x"].to_numpy() 182 | v_y.a[0:N_pin] = pin_df["y"].to_numpy() 183 | v_is_in_clk.a[0:N_pin] = pin_df["is_in_clk"].to_numpy() 184 | v_is_port.a[0:N_pin] = pin_df["is_port"].to_numpy() 185 | v_is_start.a[0:N_pin] = pin_df["is_start"].to_numpy() 186 | v_is_end.a[0:N_pin] = pin_df["is_end"].to_numpy() 187 | v_dir.a[0:N_pin] = pin_df["dir"].to_numpy() 188 | v_maxcap.a[0:N_pin] = pin_df["maxcap"].to_numpy() 189 | v_maxtran.a[0:N_pin] = pin_df["maxtran"].to_numpy() 190 | v_num_reachable_endpoint.a[0:N_pin] = pin_df["num_reachable_endpoint"].to_numpy() 191 | v_tran.a[0:N_pin] = pin_df["tran"].to_numpy() 192 | v_slack.a[0:N_pin] = pin_df["slack"].to_numpy() 193 | v_risearr.a[0:N_pin] = pin_df["risearr"].to_numpy() 194 | v_fallarr.a[0:N_pin] = pin_df["fallarr"].to_numpy() 195 | v_cap.a[0:N_pin] = pin_df["cap"].to_numpy() 196 | v_is_macro.a[0:N_pin] = pin_df["is_macro"].to_numpy() 197 | v_is_seq.a[0:N_pin] = pin_df["is_seq"].to_numpy() 198 | v_is_buf.a[0:N_pin] = pin_df["is_buf"].to_numpy() 199 | v_is_inv.a[0:N_pin] = pin_df["is_inv"].to_numpy() 200 | 201 | ### add cell properties to LPG ### 202 | v_x0 = g.new_vp("float") 203 | v_y0 = g.new_vp("float") 204 | v_x1 = g.new_vp("float") 205 | v_y1 = g.new_vp("float") 206 | v_staticpower = g.new_vp("float") 207 | v_dynamicpower = g.new_vp("float") 208 | 209 | v_fo4_delay = g.new_vp("float") 210 | v_fix_load_delay = g.new_vp("float") 211 | v_group_id = g.new_ep("int") 212 | v_libcell_id = g.new_ep("int") 213 | v_size_class = g.new_ep("int") 214 | v_size_class2 = g.new_ep("int") 215 | v_size_cnt = g.new_ep("int") 216 | 217 | v_is_seq.a[N_pin:N_pin+N_cell] = cell_df["is_seq"].to_numpy() 218 | v_is_macro.a[N_pin:N_pin+N_cell] = cell_df["is_macro"].to_numpy() 219 | v_is_in_clk.a[N_pin:N_pin+N_cell] = cell_df["is_in_clk"].to_numpy() 220 | v_x0.a[N_pin:N_pin+N_cell] = cell_df["x0"].to_numpy() 221 | v_y0.a[N_pin:N_pin+N_cell] = cell_df["y0"].to_numpy() 222 | v_x1.a[N_pin:N_pin+N_cell] = cell_df["x1"].to_numpy() 223 | v_y1.a[N_pin:N_pin+N_cell] = cell_df["y1"].to_numpy() 224 | v_is_buf.a[N_pin:N_pin+N_cell] = cell_df["is_buf"].to_numpy() 225 | v_is_inv.a[N_pin:N_pin+N_cell] = cell_df["is_inv"].to_numpy() 226 | v_staticpower.a[N_pin:N_pin+N_cell] = cell_df["staticpower"].to_numpy() 227 | v_dynamicpower.a[N_pin:N_pin+N_cell] = cell_df["dynamicpower"].to_numpy() 228 | v_x.a[N_pin:N_pin+N_cell] = cell_df["x"].to_numpy() 229 | v_y.a[N_pin:N_pin+N_cell] = cell_df["y"].to_numpy() 230 | 231 | v_fo4_delay.a[N_pin:N_pin+N_cell] = cell_df["fo4_delay"].to_numpy() 232 | v_fix_load_delay.a[N_pin:N_pin+N_cell] = cell_df["fix_load_delay"].to_numpy() 233 | v_group_id.a[N_pin:N_pin+N_cell] = cell_df["group_id"].to_numpy() 234 | v_libcell_id.a[N_pin:N_pin+N_cell] = cell_df["libcell_id"].to_numpy() 235 | v_size_class.a[N_pin:N_pin+N_cell] = cell_df["size_class"].to_numpy() 236 | v_size_class2.a[N_pin:N_pin+N_cell] = cell_df["size_class2"].to_numpy() 237 | v_size_cnt.a[N_pin:N_pin+N_cell] = cell_df["size_cnt"].to_numpy() 238 | 239 | ### add net properties to LPG ### 240 | v_net_route_length = g.new_vp("float") 241 | v_net_steiner_length = g.new_vp("float") 242 | v_fanout = g.new_vp("int") 243 | v_total_cap = g.new_vp("float") 244 | v_net_cap = g.new_vp("float") 245 | v_net_coupling = g.new_vp("float") 246 | v_net_res = g.new_vp("float") 247 | 248 | v_net_route_length.a[N_pin+N_cell:N_pin+N_cell+N_net] = net_df["net_route_length"].to_numpy() 249 | v_net_steiner_length.a[N_pin+N_cell:N_pin+N_cell+N_net] = net_df["net_steiner_length"].to_numpy() 250 | v_fanout.a[N_pin+N_cell:N_pin+N_cell+N_net] = net_df["fanout"].to_numpy() 251 | v_total_cap.a[N_pin+N_cell:N_pin+N_cell+N_net] = net_df["total_cap"].to_numpy() 252 | v_net_cap.a[N_pin+N_cell:N_pin+N_cell+N_net] = net_df["net_cap"].to_numpy() 253 | v_net_coupling.a[N_pin+N_cell:N_pin+N_cell+N_net] = net_df["net_coupling"].to_numpy() 254 | v_net_res.a[N_pin+N_cell:N_pin+N_cell+N_net] = net_df["net_res"].to_numpy() 255 | 256 | ### add cell id to pin_df 257 | cell_temp = cell_df.loc[:, ["name", "id"]] 258 | cell_temp = cell_temp.rename(columns={"name":"cellname", "id":"cell_id"}) 259 | pin_df = pin_df.merge(cell_temp, on="cellname", how="left") 260 | idx = pin_df[pd.isna(pin_df.cell_id)].index 261 | pin_df.loc[idx, ["cell_id"]] = pin_df.loc[idx, ["id"]].to_numpy() 262 | 263 | pin_cellid = pin_df.cell_id.to_numpy() 264 | # pin_isseq = v_is_seq.a[0:N_pin] 265 | pin_ismacro = v_is_macro.a[0:N_pin] 266 | # mask = (pin_isseq==True)| (pin_ismacro==True) 267 | mask = pin_ismacro==True 268 | pin_cellid[mask] = pin_df[mask].id ### for pins in macro and seq, pin_cellid = pin id 269 | 270 | ### add net id to pin_df 271 | net_temp = net_df.loc[:, ["name", "id"]] 272 | net_temp = net_temp.rename(columns={"name":"netname", "id":"net_id"}) 273 | pin_df = pin_df.merge(net_temp, on="netname", how="left") 274 | 275 | ### generate pin-pin graph ### 276 | g_pin = GraphView(g, vfilt=(v_type.a==0), efilt=e_type.a==0) 277 | print("pin graph: num of nodes, num of edges: ", g_pin.num_vertices(), g_pin.num_edges()) 278 | 279 | ### threshold to remove small components in the netlist 280 | cell_cnt_th = 200 281 | 282 | ### get the large components 283 | comp, hist = label_components(g_pin, directed=False) 284 | comp.a[N_pin:] = -1 285 | labels = get_large_components(hist, th=cell_cnt_th) 286 | v_valid_pins = g_pin.new_vp("bool") 287 | for l in labels: 288 | v_valid_pins.a[comp.a==l] = True 289 | 290 | ### get subgraphs 291 | e_label = g_pin.new_ep("bool") 292 | e_label.a = False 293 | e_ar = g_pin.get_edges(eprops=[e_id]) 294 | v_ar = g.get_vertices(vprops=[v_is_buf, v_is_inv, v_valid_pins]) 295 | src = e_ar[:,0] 296 | tar = e_ar[:,1] 297 | idx = e_ar[:,2] 298 | mask = (v_ar[src, -1] == True) & (v_ar[tar, -1] == True) 299 | e_label.a[idx[mask]] = True 300 | u = get_subgraph(g_pin, v_valid_pins, e_label) 301 | 302 | ### mark selected pins ### 303 | pin_df["selected"] = v_valid_pins.a[0:N_pin] 304 | ### 305 | 306 | ### get buffer tree start and end points 307 | v_bt_s = g.new_vp("bool") 308 | v_bt_e = g.new_vp("bool") 309 | v_bt_s.a = False 310 | v_bt_e.a = False 311 | 312 | e_ar = u.get_edges() 313 | v_ar = g.get_vertices(vprops=[v_is_buf, v_is_inv]) 314 | src = e_ar[:,0] 315 | tar = e_ar[:,1] 316 | src_isbuf = v_ar[src,1] 317 | src_isinv = v_ar[src,2] 318 | tar_isbuf = v_ar[tar,1] 319 | tar_isinv = v_ar[tar,2] 320 | is_s = (tar_isbuf | tar_isinv ) & np.logical_not(src_isbuf) & np.logical_not(src_isinv) 321 | v_bt_s.a[src[is_s==1]] = True 322 | 323 | src_iss = v_bt_s.a[src]==True 324 | is_e = (src_isbuf | src_isinv | src_iss) & np.logical_not(tar_isbuf) & np.logical_not(tar_isinv) 325 | v_bt_e.a[tar[is_e==1]] = True 326 | 327 | ### get buf tree start pin id ### 328 | v_net_id = g.new_vp("int") 329 | v_net_id.a[0:N_pin] = pin_df.net_id.to_numpy() 330 | mask = v_bt_s.a < 1 331 | v_net_id.a[mask] = 0 332 | 333 | ### mark buffer trees 334 | v_tree_id = g.new_vp("int") 335 | v_tree_id.a = 0 336 | v_polarity = g.new_vp("bool") 337 | v_polarity.a = True 338 | e_tree_id = g.new_ep("int") 339 | e_tree_id.a = 0 340 | 341 | tree_end_list = [] 342 | buf_list = [] 343 | 344 | v_all = g.get_vertices() 345 | l = np.array(list(range(1, int(v_bt_s.a.sum())+1))) 346 | v_tree_id.a[v_bt_s.a>0] = l 347 | loc = v_all[v_bt_s.a>0] 348 | out_v_list = [] 349 | for i in loc: 350 | out_e = u.get_out_edges(i, eprops=[e_id]) 351 | out_v = out_e[:,1] 352 | v_tree_cnt = v_tree_id[i] 353 | net_id = v_net_id[i] 354 | e_tree_id.a[out_e[:,-1]] = v_tree_cnt 355 | v_tree_id.a[out_v] = v_tree_cnt 356 | v_net_id.a[out_v] = net_id 357 | tree_end_list.append(out_v[(v_is_buf.a[out_v]==False) & (v_is_inv.a[out_v]==False)]) 358 | out_v = out_v[(v_is_buf.a[out_v]==True) | (v_is_inv.a[out_v]==True)] 359 | buf_list.append(out_v) 360 | out_v_list.append(out_v) 361 | new_v = np.concatenate(out_v_list, axis=0) 362 | N, = new_v.shape 363 | while N > 0: 364 | out_v_list = [] 365 | for i in new_v: 366 | if v_is_buf[i]: 367 | out_e = u.get_out_edges(i, eprops=[e_id]) 368 | out_v = out_e[:,1] 369 | v_tree_cnt = v_tree_id[i] 370 | net_id = v_net_id[i] 371 | v_p = v_polarity.a[i] 372 | e_tree_id.a[out_e[:,-1]] = v_tree_cnt 373 | v_tree_id.a[out_v] = v_tree_cnt 374 | v_net_id.a[out_v] = net_id 375 | v_polarity.a[out_v] = v_p 376 | tree_end_list.append(out_v[(v_is_buf.a[out_v]==False) & (v_is_inv.a[out_v]==False)]) 377 | out_v = out_v[(v_is_buf.a[out_v]==True) | (v_is_inv.a[out_v]==True)] 378 | buf_list.append(out_v) 379 | out_v_list.append(out_v) 380 | else: 381 | out_e = u.get_out_edges(i, eprops=[e_id]) 382 | out_v = out_e[:,1] 383 | v_tree_cnt = v_tree_id[i] 384 | net_id = v_net_id[i] 385 | v_p = v_polarity.a[i] 386 | e_tree_id.a[out_e[:,-1]] = v_tree_cnt 387 | v_tree_id.a[out_v] = v_tree_cnt 388 | v_net_id.a[out_v] = net_id 389 | if v_dir[i]: 390 | v_polarity.a[out_v] = not v_p 391 | else: 392 | v_polarity.a[out_v] = v_p 393 | ### 394 | tree_end_list.append(out_v[(v_is_buf.a[out_v]==False) & (v_is_inv.a[out_v]==False)]) 395 | ### 396 | out_v = out_v[(v_is_buf.a[out_v]==True) | (v_is_inv.a[out_v]==True)] 397 | ### 398 | buf_list.append(out_v) 399 | ### 400 | out_v_list.append(out_v) 401 | new_v = np.concatenate(out_v_list, axis=0) 402 | N, = new_v.shape 403 | 404 | ### get actual number of BT end pin cnt 405 | tree_end_list_new = np.concatenate(tree_end_list, axis=0) 406 | N_bt_e = tree_end_list_new.shape[0] 407 | v_bt_e = g.new_vp("bool") 408 | v_bt_e.a = False 409 | v_bt_e.a[tree_end_list_new] = True 410 | 411 | pin_df["net_id_rm_bt"] = pin_df["net_id"] 412 | pin_df.loc[tree_end_list_new, ["net_id_rm_bt"]] = v_net_id.a[tree_end_list_new] 413 | 414 | ############################################ 415 | #Gathering dataset for training and testing# 416 | ############################################ 417 | 418 | ### get selected pins ### 419 | selected_pin_df = pin_df[(pin_df.selected == True) & (pin_df.is_buf == False) & (pin_df.is_inv == False)] 420 | 421 | ### get driver pins and related properties ### 422 | driver_pin = selected_pin_df[selected_pin_df.dir==0] 423 | driver_pin_info = driver_pin.loc[:, ["id", "net_id", "x", "y", "cell_id", "risearr", "fallarr"]] 424 | driver_pin_info = driver_pin_info.rename(columns={"id":"driver_pin_id", "x":"driver_x", "y":"driver_y", "cell_id":"driver_id", "risearr":"driver_risearr", "fallarr":"driver_fallarr"}) 425 | cell_info = cell_df.loc[:, ["id", "libcell_id", "fo4_delay", "fix_load_delay"]] 426 | cell_info = cell_info.rename(columns={"id":"driver_id", "y":"driver_y"}) 427 | driver_pin_info = driver_pin_info.merge(cell_info, on="driver_id", how="left") 428 | 429 | ### get sink pins and related properties ### 430 | sink_pin = selected_pin_df[selected_pin_df.dir==1] 431 | sink_pin_info = sink_pin.loc[:, ["id", "x", "y", "cap", "net_id", "cell_id", "risearr", "fallarr"]] 432 | sink_pin_info = sink_pin_info.merge(driver_pin_info, on="net_id", how="left") 433 | 434 | sink_pin_info.x = sink_pin_info.x - sink_pin_info.driver_x 435 | sink_pin_info.y = sink_pin_info.y - sink_pin_info.driver_y 436 | idx = sink_pin_info[pd.isna(sink_pin_info.driver_x)].index 437 | sink_pin_info = sink_pin_info.drop(idx) 438 | 439 | ### get context sink locations ### 440 | sink_loc = sink_pin_info.groupby('net_id', as_index=False).agg({'x': ['mean', 'min', 'max', 'std'], 'y': ['mean', 'min', 'max', 'std'], 'cap': ['sum']}) 441 | sink_loc.columns = ['_'.join(col).rstrip('_') for col in sink_loc.columns.values] 442 | sink_loc['x_std'] = sink_loc['x_std'].fillna(0) 443 | sink_loc['y_std'] = sink_loc['y_std'].fillna(0) 444 | 445 | ### merge information and rename ### 446 | sink_pin_info = sink_pin_info.merge(sink_loc, on="net_id", how="left") 447 | sink_pin_info = sink_pin_info.rename(columns={"libcell_id":"driver_libcell_id", "fo4_delay":"driver_fo4_delay", "fix_load_delay":"driver_fix_load_delay", \ 448 | "x_mean": "context_x_mean", "x_min": "context_x_min", "x_max": "context_x_max", "x_std": "context_x_std", \ 449 | "y_mean": "context_y_mean", "y_min": "context_y_min", "y_max": "context_y_max", "y_std": "context_y_std", \ 450 | "risearr":"sink_risearr", "fallarr":"sink_fallarr"}) 451 | sink_pin_info["sink_arr"] = sink_pin_info[["sink_risearr", "sink_fallarr"]].min(axis=1) 452 | sink_pin_info["driver_arr"] = sink_pin_info[["driver_risearr", "driver_fallarr"]].min(axis=1) 453 | 454 | ### get cell arc delays ### 455 | cell_arc = pin_pin_df.groupby('tar_id', as_index=False).agg({'arc_delay': ['mean', 'min', 'max']}) 456 | cell_arc.columns = ['_'.join(col).rstrip('_') for col in cell_arc.columns.values] 457 | cell_arc = cell_arc.rename(columns={"tar_id":"driver_pin_id"}) 458 | sink_pin_info = sink_pin_info.astype({"driver_pin_id":"int"}) 459 | sink_pin_info = sink_pin_info.merge(cell_arc, on="driver_pin_id", how="left") 460 | idx = sink_pin_info[pd.isna(sink_pin_info.arc_delay_mean)].index 461 | sink_pin_info = sink_pin_info.drop(idx) 462 | 463 | ### get net delay ### 464 | cell_arc = cell_arc.rename(columns={"driver_pin_id":"id", "arc_delay_mean":"net_delay_mean", "arc_delay_min":"net_delay_min", "arc_delay_max":"net_delay_max"}) 465 | sink_pin_info = sink_pin_info.merge(cell_arc, on="id", how="left") 466 | 467 | ### stage delay = driver cell arc delay + net delay ### 468 | sink_pin_info["stage_delay"] = sink_pin_info.arc_delay_max + sink_pin_info.net_delay_max 469 | 470 | print("Reference data frame") 471 | print(sink_pin_info) 472 | 473 | # x, y: distance between driver and the target sink 474 | # cap, cap_sum: sink capacitance 475 | # driver_fo4_delay driver_fix_load_delay: driving strength of the driver cell 476 | # context_x_mean", context_x_min, context_x_max, context_x_std, context_y_mean, context_y_min, context_y_max, context_y_std: Context sink locations 477 | features = sink_pin_info.loc[:, ["x", "y", "cap", "cap_sum", "driver_fo4_delay", "driver_fix_load_delay", \ 478 | "context_x_mean", "context_x_min", "context_x_max", "context_x_std", \ 479 | "context_y_mean", "context_y_min", "context_y_max", "context_y_std"]].to_numpy().astype(float) 480 | labels = sink_pin_info.loc[:, ["stage_delay"]].to_numpy().astype(float) 481 | 482 | features = preprocessing.normalize(features, axis=0) 483 | labels = preprocessing.normalize(labels, axis=0) 484 | labels = labels.reshape([-1,]) 485 | 486 | nb_samples = features.shape[0] 487 | nb_feat = features.shape[1] 488 | 489 | train_x, test_x, train_y, test_y = train_test_split(features, labels, test_size=0.05) 490 | 491 | nb_train_samples = train_x.shape[0] 492 | nb_test_samples = train_y.shape[0] 493 | 494 | print("Training Machine Learning Model") 495 | 496 | nb_estim = 500 497 | max_feat = 0.5 498 | model = RandomForestRegressor(n_estimators=nb_estim, max_features=max_feat) 499 | model.fit(train_x, train_y) 500 | 501 | pred = model.predict(train_x) 502 | 503 | plt.figure() 504 | plt.scatter(pred, train_y, label = "Training") 505 | 506 | pred = model.predict(test_x) 507 | plt.scatter(pred, test_y, label = "Testing") 508 | 509 | data_range = np.arange(min(np.min(train_y), np.min(test_y)), 510 | max(np.max(train_y), np.max(test_y)), 511 | 0.005 512 | ) 513 | plt.plot(data_range, data_range, label="Reference") 514 | 515 | plt.title("Accuracy on trianing data and testing data") 516 | 517 | plt.legend() 518 | plt.xlabel("Reference") 519 | plt.ylabel("Predicted") 520 | plt.show() 521 | 522 | -------------------------------------------------------------------------------- /session2/demo4_preroute_net_delay_prediction_helpers.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import pandas as pd 16 | import numpy as np 17 | from graph_tool.all import * 18 | from numpy.random import * 19 | import time 20 | import graph_tool as gt 21 | import sys 22 | from sklearn.ensemble import RandomForestRegressor 23 | from sklearn.model_selection import train_test_split 24 | from sklearn import preprocessing 25 | import matplotlib.pyplot as plt 26 | 27 | #### helper functions #### 28 | def read_tables_OpenROAD(data_root, design=None): 29 | cell_cell_path = data_root + "cell_cell_edge.csv" 30 | cell_pin_path = data_root + "cell_pin_edge.csv" 31 | cell_path = data_root + "cell_properties.csv" 32 | net_pin_path = data_root + "net_pin_edge.csv" 33 | net_path = data_root + "net_properties.csv" 34 | pin_pin_path = data_root + "pin_pin_edge.csv" 35 | pin_path = data_root + "pin_properties.csv" 36 | net_cell_path = data_root + "cell_net_edge.csv" 37 | all_fo4_delay_path = data_root + "libcell_properties.csv" 38 | 39 | ### load tables 40 | fo4_df = pd.read_csv(all_fo4_delay_path) 41 | pin_df = pd.read_csv(pin_path) 42 | cell_df = pd.read_csv(cell_path) 43 | net_df = pd.read_csv(net_path) 44 | cell_cell_df = pd.read_csv(cell_cell_path) 45 | pin_pin_df = pd.read_csv(pin_pin_path) 46 | cell_pin_df = pd.read_csv(cell_pin_path) 47 | net_pin_df = pd.read_csv(net_pin_path) 48 | net_cell_df = pd.read_csv(net_cell_path) 49 | return pin_df, cell_df, net_df, pin_pin_df, cell_pin_df, net_pin_df, net_cell_df, cell_cell_df, fo4_df 50 | 51 | ### rename cells with cell0, cell1, ... and update the cell names in pin_df 52 | def rename_cells(cell_df, pin_df): 53 | ### rename cells ### 54 | cell_name = cell_df[["name"]] 55 | cell_name.loc[:, ["new_cellname"]] = ["cell" + str(i) for i in range(cell_name.shape[0])] 56 | pin_df = pin_df.merge(cell_name.rename(columns={"name":"cellname"}), on="cellname", how="left") 57 | idx = pin_df[pd.isna(pin_df.new_cellname)].index 58 | 59 | port_names = ["port" + str(i) for i in range(len(idx))] 60 | pin_df.loc[idx, "new_cellname"] = port_names 61 | cell_df["new_cellname"] = cell_name.new_cellname.values 62 | return cell_df, pin_df 63 | 64 | ### rename nets with net0, net1, ... and update the net names in pin_df 65 | def rename_nets(net_df, pin_df): 66 | ### rename nets ### 67 | net_name = net_df[["name"]] 68 | net_name.loc[:, ["new_netname"]] = ["net" + str(i) for i in range(net_name.shape[0])] 69 | pin_df = pin_df.merge(net_name.rename(columns={"name":"netname"}), on="netname", how="left") 70 | return net_df, pin_df 71 | 72 | ### 1) get edge src and tar ids and 2) generate edge_df by merging all edges 73 | def generate_edge_df_OpenROAD(pin_df, cell_df, net_df, pin_pin_df, cell_pin_df, net_pin_df, net_cell_df, cell_cell_df): 74 | edge_id = pd.concat([pin_df.loc[:,["id", "name"]], cell_df.loc[:,["id", "name"]], net_df.loc[:,["id", "name"]]], ignore_index=True) 75 | src = edge_id.copy() 76 | src = src.rename(columns={"id":"src_id", "name":"src"}) 77 | tar = edge_id.copy() 78 | tar = tar.rename(columns={"id":"tar_id", "name":"tar"}) 79 | 80 | pin_pin_df = pin_pin_df.merge(src, on="src", how="left") 81 | pin_pin_df = pin_pin_df.merge(tar, on="tar", how="left") 82 | 83 | cell_pin_df = cell_pin_df.merge(src, on="src", how="left") 84 | cell_pin_df = cell_pin_df.merge(tar, on="tar", how="left") 85 | 86 | net_pin_df = net_pin_df.merge(src, on="src", how="left") 87 | net_pin_df = net_pin_df.merge(tar, on="tar", how="left") 88 | 89 | net_cell_df = net_cell_df.merge(src, on="src", how="left") 90 | net_cell_df = net_cell_df.merge(tar, on="tar", how="left") 91 | 92 | cell_cell_df = cell_cell_df.merge(src, on="src", how="left") 93 | cell_cell_df = cell_cell_df.merge(tar, on="tar", how="left") 94 | 95 | # drop illegal edges 96 | idx = pin_pin_df[pd.isna(pin_pin_df.src_id)].index 97 | pin_pin_df = pin_pin_df.drop(idx) 98 | idx = pin_pin_df[pd.isna(pin_pin_df.tar_id)].index 99 | pin_pin_df = pin_pin_df.drop(idx) 100 | 101 | idx = cell_pin_df[pd.isna(cell_pin_df.src_id)].index 102 | cell_pin_df = cell_pin_df.drop(idx) 103 | idx = cell_pin_df[pd.isna(cell_pin_df.tar_id)].index 104 | cell_pin_df = cell_pin_df.drop(idx) 105 | 106 | idx = net_pin_df[pd.isna(net_pin_df.src_id)].index 107 | net_pin_df = net_pin_df.drop(idx) 108 | idx = net_pin_df[pd.isna(net_pin_df.tar_id)].index 109 | net_pin_df = net_pin_df.drop(idx) 110 | 111 | idx = net_cell_df[pd.isna(net_cell_df.src_id)].index 112 | net_cell_df = net_cell_df.drop(idx) 113 | idx = net_cell_df[pd.isna(net_cell_df.tar_id)].index 114 | net_cell_df = net_cell_df.drop(idx) 115 | 116 | idx = cell_cell_df[pd.isna(cell_cell_df.src_id)].index 117 | cell_cell_df = cell_cell_df.drop(idx) 118 | idx = cell_cell_df[pd.isna(cell_cell_df.tar_id)].index 119 | cell_cell_df = cell_cell_df.drop(idx) 120 | 121 | edge_df = pd.concat([pin_pin_df.loc[:,["src_id", "tar_id"]], cell_pin_df.loc[:,["src_id", "tar_id"]], \ 122 | net_pin_df.loc[:,["src_id", "tar_id"]], net_cell_df.loc[:,["src_id", "tar_id"]], \ 123 | cell_cell_df.loc[:,["src_id", "tar_id"]]], ignore_index=True) 124 | 125 | return pin_pin_df, cell_pin_df, net_pin_df, net_cell_df, cell_cell_df, edge_df 126 | 127 | def get_large_components(hist, th=2000): 128 | labels = [] 129 | for i in range(len(hist)): 130 | if hist[i] > th: 131 | labels.append(i) 132 | return labels 133 | 134 | ### generate subgraph 135 | def get_subgraph(g_old, v_mask, e_mask): 136 | u = GraphView(g_old, vfilt=v_mask, efilt=e_mask) 137 | print("connected component graph: num of edge; num of nodes", u.num_vertices(), u.num_edges()) 138 | ### check whether subgraph is connected and is DAG 139 | _, hist2 = label_components(u, directed=False) 140 | return u 141 | 142 | ### generate cell graph from cell ids 143 | def get_cell_graph_from_cells(u_cells, g, e_type, e_id): 144 | u_cells = np.unique(u_cells).astype(int) 145 | 146 | # add cell2cell edge 147 | v_mask_cell = g.new_vp("bool") 148 | e_mask_cell = g.new_ep("bool") 149 | v_mask_cell.a[u_cells] = True 150 | 151 | e_ar = g.get_edges(eprops=[e_type, e_id]) 152 | mask = e_ar[:,2]==4 # edge type == 4: cell2cell 153 | e_ar = e_ar[mask] 154 | e_src = e_ar[:,0] 155 | e_tar = e_ar[:,1] 156 | e_mask = (v_mask_cell.a[e_src] == True) & (v_mask_cell.a[e_tar] == True) 157 | e_mask_cell.a[e_ar[:,-1][e_mask]] = True 158 | print("num of edges to add", e_mask.sum()) 159 | print("num of edges", e_mask_cell.a.sum()) 160 | 161 | ### construct and check u_cell_g 162 | u_cell_g = get_subgraph(g, v_mask_cell, e_mask_cell) 163 | return u_cell_g 164 | 165 | 166 | --------------------------------------------------------------------------------