├── .env_example ├── .github └── workflows │ ├── main.yaml │ └── test.yaml ├── .gitignore ├── .node-version ├── README.md ├── __tests__ └── danglingPages_test.js ├── babel.config.js ├── docs ├── additional-features │ ├── blockscout.md │ ├── chainbridge │ │ ├── definitions.md │ │ ├── overview.md │ │ ├── setup-erc20-transfer.md │ │ ├── setup-erc721-transfer.md │ │ ├── setup.md │ │ ├── use-case-erc20-bridge.md │ │ └── use-case-erc721-bridge.md │ └── stress-testing.md ├── architecture │ ├── modules │ │ ├── blockchain.md │ │ ├── consensus.md │ │ ├── json-rpc.md │ │ ├── minimal.md │ │ ├── networking.md │ │ ├── other-modules.md │ │ ├── protocol.md │ │ ├── sealer.md │ │ ├── state.md │ │ ├── storage.md │ │ ├── txpool.md │ │ └── types.md │ └── overview.md ├── community │ ├── propose-new-feature.md │ └── report-bug.md ├── concepts │ └── ethereum-state.md ├── configuration │ ├── enable-metrics.mdx │ ├── manage-private-keys.md │ ├── sample-config.md │ └── secret-managers │ │ ├── set-up-aws-ssm.md │ │ ├── set-up-gcp-secrets-manager.md │ │ └── set-up-hashicorp-vault.md ├── consensus │ ├── migration-to-pos.md │ ├── poa.md │ ├── pos-concepts.md │ └── pos-stake-unstake.md ├── get-started │ ├── cli-commands.mdx │ ├── installation.md │ ├── json-rpc-commands.mdx │ ├── set-up-ibft-locally.md │ └── set-up-ibft-on-the-cloud.md ├── overview.md ├── performance-reports │ ├── overview.md │ └── test-history │ │ ├── test-2022-01-21.md │ │ ├── test-2022-03-02.md │ │ ├── test-2022-03-23.md │ │ └── test-2022-07-04.md └── working-with-node │ ├── backup-restore.md │ ├── query-json-rpc.md │ └── query-operator-info.md ├── docusaurus.config.js ├── package.json ├── scrapper_config.json ├── sidebars.js ├── src ├── css │ └── custom.css └── pages │ ├── markdown-page.md │ └── styles.module.css ├── static ├── .nojekyll ├── img │ ├── Architecture.jpg │ ├── android-chrome-192x192.png │ ├── android-chrome-512x512.png │ ├── apple-touch-icon.png │ ├── browserconfig.xml │ ├── chainbridge │ │ ├── architecture.svg │ │ └── erc20-workflow.svg │ ├── docusaurus.png │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ ├── ibft-setup │ │ ├── cloud.svg │ │ └── local.svg │ ├── logo.svg │ ├── mstile-150x150.png │ ├── safari-pinned-tab.svg │ ├── site.webmanifest │ ├── state │ │ ├── accountState.png │ │ ├── block.png │ │ ├── mainDiagram.png │ │ ├── merkleTree.png │ │ └── worldState.png │ ├── txpool-error-1.png │ ├── txpool-error-2.png │ ├── undraw_docusaurus_mountain.svg │ ├── undraw_docusaurus_react.svg │ └── undraw_docusaurus_tree.svg └── index.html ├── tsconfig.json └── yarn.lock /.env_example: -------------------------------------------------------------------------------- 1 | TYPESENSE_API_KEY= -------------------------------------------------------------------------------- /.github/workflows/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | on: 3 | push: 4 | branches: 5 | - main 6 | jobs: 7 | run_scraper: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - 11 | name: Checkout code 12 | uses: actions/checkout@v2 13 | - 14 | name: Run docker scraper 15 | run: | 16 | docker run -e "TYPESENSE_API_KEY=${{ secrets.TYPESENSE_API_KEY }}" -e "TYPESENSE_HOST=${{ secrets.TYPESENSE_HOST }}" -e "TYPESENSE_PORT=443" -e "TYPESENSE_PROTOCOL=https" -e "CONFIG=$(cat $GITHUB_WORKSPACE/scrapper_config.json | jq -r tostring)" typesense/docsearch-scraper 17 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | on: 3 | pull_request: 4 | 5 | jobs: 6 | test: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Setup NodeJS 10 | uses: actions/setup-node@v2 11 | 12 | - name: Checkout code 13 | uses: actions/checkout@v2 14 | 15 | - name: Install required dependencies 16 | run: npm i 17 | - name: Run tests 18 | run: npm run test -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | /build 4 | 5 | # Generated files 6 | .docusaurus 7 | .cache-loader 8 | 9 | .idea 10 | 11 | # Misc 12 | .DS_Store 13 | .env.local 14 | .env.development.local 15 | .env.test.local 16 | .env.production.local 17 | 18 | npm-debug.log* 19 | yarn-debug.log* 20 | yarn-error.log* 21 | 22 | # Prettier 23 | .prettierrc 24 | 25 | #Environment variables 26 | .env -------------------------------------------------------------------------------- /.node-version: -------------------------------------------------------------------------------- 1 | 14 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Polygon Edge Docs 2 | 3 | The Polygon Edge docs have been migrated to https://github.com/maticnetwork/matic-docs 4 | 5 | This repo is archived. 6 | -------------------------------------------------------------------------------- /__tests__/danglingPages_test.js: -------------------------------------------------------------------------------- 1 | const sideBarItems = require("../sidebars.js") 2 | const glob = require("glob"); 3 | 4 | var getFilePaths = (src, callback) => { 5 | glob(src + '/**/*.md*', callback); 6 | }; 7 | 8 | const extractItemsFromSidebar = (data, outputArray) => { 9 | data.forEach(element => { 10 | if (element.items != undefined) { 11 | extractItemsFromSidebar(element.items, outputArray); 12 | } else { 13 | outputArray.push(element); 14 | } 15 | }); 16 | } 17 | 18 | test("Should not have pages outside sidebar", done => { 19 | return getFilePaths('./docs', (err, filePaths) => { 20 | let danglingPages = [] 21 | try { 22 | if (err) { 23 | done(err); 24 | } else { 25 | let modifiedFilePaths = []; 26 | 27 | // remove root folder from file path and file extension 28 | for (let filepath of filePaths) { 29 | let modifiedFilePath= filepath.substring(7).replace(/(.mdx|.md)$/, ""); 30 | modifiedFilePaths.push(modifiedFilePath) 31 | } 32 | 33 | var filePathsFromSidebar = []; 34 | 35 | //Extract individual file paths from sidebar tree 36 | extractItemsFromSidebar(sideBarItems.develop, filePathsFromSidebar); 37 | 38 | //Find dangling files 39 | for (const filePath of modifiedFilePaths) { 40 | if (filePathsFromSidebar.indexOf(filePath) == -1) { 41 | danglingPages.push(filePath) 42 | } 43 | } 44 | expect(danglingPages.length).toEqual(0); 45 | done(); 46 | } 47 | } catch (error) { 48 | if(error.matcherResult != undefined){ 49 | error = new Error("Not all files are in the sidebar tree: \n\n" + danglingPages.join('\n').toString()) 50 | } 51 | done(error); 52 | } 53 | 54 | }); 55 | }); -------------------------------------------------------------------------------- /babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = function(api){ 2 | if (api.env("test")) { 3 | return { 4 | presets: ["@babel/preset-env"], 5 | } 6 | } 7 | return { 8 | presets: [require.resolve('@docusaurus/core/lib/babel/preset')], 9 | } 10 | }; 11 | -------------------------------------------------------------------------------- /docs/additional-features/blockscout.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: blockscout 3 | title: Blockscout 4 | --- 5 | 6 | ## Overview 7 | This guide goes into details on how to compile and deploy Blockscout instance to work with Polygon-Edge. 8 | Blockscout has its own [documentation](https://docs.blockscout.com/for-developers/manual-deployment), but this guide focuses on simple but detailed step-by-step instructions on how to setup Blockscout instance. 9 | 10 | ## Environment 11 | * Operating System: Ubuntu Server 20.04 LTS [download link](https://releases.ubuntu.com/20.04/) with sudo permissions 12 | * Server Hardware: 2CPU / 4GB RAM / 50GB HDD (LVM) 13 | * Database Server: Dedicated server with 2 CPU / 4GB RAM / 30GB SSD / PostresSQL 13.4 14 | 15 | ### DB Server 16 | The requirement for following this guide is to have a database server ready, database and db user configured. 17 | This guide will not go into details on how to deploy and configure PosgreSQL server. 18 | There are plenty of guides on now to do this, for example [DigitalOcean Guide](https://www.digitalocean.com/community/tutorials/how-to-install-postgresql-on-ubuntu-20-04-quickstart) 19 | 20 | :::info DISCLAMER 21 | This guide is meant only to help you to get Blockscout up and running on a single instance which is not ideal production setup. 22 | For production, you'll probably want to introduce reverse proxy, load balancer, scalability options, etc. into the architecture. 23 | ::: 24 | 25 | # Blockscout Deployment Procedure 26 | 27 | ## Part 1 - install dependancies 28 | Before we start we need to make sure we have all the binaries installed that the blockscout is dependent on. 29 | 30 | ### Update & upgrade system 31 | ```bash 32 | sudo apt update && sudo apt -y upgrade 33 | ``` 34 | 35 | ### Install erlang and its dependancies from default packages 36 | ```bash 37 | sudo apt -y install erlang 38 | ``` 39 | 40 | ### Add erlang repos 41 | ```bash 42 | # go to your home dir 43 | cd ~ 44 | # download deb 45 | wget https://packages.erlang-solutions.com/erlang-solutions_2.0_all.deb 46 | # download key 47 | wget https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc 48 | # install repo 49 | sudo dpkg -i erlang-solutions_2.0_all.deb 50 | # install key 51 | sudo apt-key add erlang_solutions.asc 52 | # remove deb 53 | rm erlang-solutions_2.0_all.deb 54 | # remove key 55 | rm erlang_solutions.asc 56 | ``` 57 | 58 | ### Add NodeJS repo 59 | ```bash 60 | sudo curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash - 61 | ``` 62 | 63 | ### Install Rust 64 | ```bash 65 | sudo curl https://sh.rustup.rs -sSf | sh -s -- -y 66 | ``` 67 | 68 | ### Install specific versions of Erlang and Elixir 69 | ```bash 70 | sudo apt -y install esl-erlang=1:24.* elixir=1.12.* 71 | ``` 72 | 73 | ### Install NodeJS 74 | ```bash 75 | sudo apt -y install nodejs 76 | ``` 77 | 78 | ### Install Cargo 79 | ```bash 80 | sudo apt -y install cargo 81 | ``` 82 | 83 | ### Install other dependancies 84 | ```bash 85 | sudo apt -y install automake libtool inotify-tools gcc libgmp-dev make g++ git 86 | ``` 87 | 88 | ### Optionaly install postgresql client to check your db connection 89 | ```bash 90 | sudo apt -y postgresql-client 91 | ``` 92 | 93 | ## Part 2 - set environment variables 94 | We need to set the environment variables, before we begin with Blockscout compilation. 95 | In this guide we'll set only the basic minimum to get it working. 96 | Full list of variables that can be set you can find [here](https://docs.blockscout.com/for-developers/information-and-settings/env-variables) 97 | 98 | ### Set env vars 99 | ```bash 100 | # example: ETHEREUM_JSONRPC_HTTP_URL=https://rpc.poa.psdk.io:8545 101 | export ETHEREUM_JSONRPC_HTTP_URL= 102 | # example: ETHEREUM_JSONRPC_TRACE_URL=https://rpc.poa.psdk.io:8545 103 | export ETHEREUM_JSONRPC_TRACE_URL= 104 | # example: ETHEREUM_JSONRPC_WS_URL=wss://rpc.poa.psdk.io:8545/ws 105 | export ETHEREUM_JSONRPC_WS_URL= 106 | # used for automaticaly restarting the service if it crashes 107 | export HEART_COMMAND="systemctl start explorer.service" 108 | # postgresql connection example: DATABASE_URL=postgresql://blockscout:Passw0Rd@db.instance.local:5432/blockscout 109 | export DATABASE_URL=postgresql://:@:/ 110 | # secret key base as per docs https://docs.blockscout.com/for-developers/manual-deployment ( Step 4 ) 111 | export SECRET_KEY_BASE=VTIB3uHDNbvrY0+60ZWgUoUBKDn9ppLR8MI4CpRz4/qLyEFs54ktJfaNT6Z221No 112 | 113 | # we set these env vars to test the db connection 114 | export PGPASSWORD=Passw0Rd 115 | export PGUSER=blockscout 116 | export PGHOST=db.instance.local 117 | ``` 118 | 119 | Now test your DB connection with provided parameters. 120 | Since you've provided PG env vars, you should be able to connect to the database only by running: 121 | ```bash 122 | psql 123 | ``` 124 | 125 | If the database is configured correctly, you should see a psql prompt: 126 | ```bash 127 | psql (12.9 (Ubuntu 12.9-0ubuntu0.20.04.1)) 128 | SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, bits: 256, compression: off) 129 | Type "help" for help. 130 | 131 | blockscout=> 132 | ``` 133 | 134 | Otherwise you might see an error like this: 135 | ```bash 136 | psql: error: FATAL: password authentication failed for user "blockscout" 137 | FATAL: password authentication failed for user "blockscout" 138 | ``` 139 | If this is the case [these docs](https://ubuntu.com/server/docs/databases-postgresql) might help you. 140 | 141 | :::info DB Connection 142 | Make sure you've sorted out all db connection issues before proceeding to the next part. 143 | You'll need to provide superuser privileges to blockscout user. 144 | ::: 145 | ```bash 146 | postgres@ubuntu:~$ createuser --interactive 147 | Enter name of role to add: blockscout 148 | Shall the new role be a superuser? (y/n) y 149 | ``` 150 | 151 | ## Part 3 - clone and compile Blockscout 152 | Now we finaly get to start the Blockscout installation. 153 | 154 | ### Clone Blockscout repo 155 | ```bash 156 | cd ~ 157 | git clone https://github.com/poanetwork/blockscout.git 158 | ``` 159 | 160 | ### Compile 161 | Cd into clone directory and start compiling 162 | 163 | ```bash 164 | cd blockcout 165 | mix local.hex --force 166 | mix do deps.get, local.rebar --force, deps.compile, compile 167 | ``` 168 | 169 | ### Migrate databases 170 | :::info 171 | This part will fail if you didn't setup your DB connection properly, you didn't provide or you've defined wrong parameters at DATABASE_URL environment variable. 172 | The database user needs to have superuser privileges. 173 | ::: 174 | ```bash 175 | mix do ecto.create, ecto.migrate 176 | ``` 177 | 178 | ### Install npm dependancies and compile frontend assets 179 | You need to change directory to the folder which contains frontend assets. 180 | 181 | ```bash 182 | cd apps/block_scout_web/assets 183 | sudo npm install 184 | sudo node_modules/webpack/bin/webpack.js --mode production 185 | ``` 186 | 187 | :::info Be patient 188 | Compilation of these assets can take a few minutes, and it will display no output. 189 | It can look like the process is stuck, but just be patient. 190 | When compile process is finished, it should output something like: `webpack 5.69.1 compiled with 3 warnings in 104942 ms` 191 | ::: 192 | 193 | ### Build static assets 194 | For this step you need to return to the root of your Blockscout clone folder. 195 | ```bash 196 | cd ~/blockscout 197 | sudo mix phx.digest 198 | ``` 199 | 200 | ### Generate self-signed certificates 201 | ```bash 202 | cd apps/block_scout_web 203 | mix phx.gen.cert blockscout blockscout.local 204 | ``` 205 | 206 | ## Part 4 - create and run Blockscout service 207 | In this part we need to setup a system service as we want Blockscout to run in the backround and persist after system reboot. 208 | 209 | ### Create service file 210 | ```bash 211 | sudo touch /etc/systemd/system/explorer.service 212 | ``` 213 | 214 | ### Edit service file 215 | Use your favorite linux text editor to edit this file and configure the service. 216 | ```bash 217 | sudo vi /etc/systemd/system/explorer.service 218 | ``` 219 | The contents of the explorer.service file should look like this: 220 | ``` 221 | [Unit] 222 | Description=Blockscout Server 223 | After=network.target 224 | StartLimitIntervalSec=0 225 | 226 | [Service] 227 | Type=simple 228 | Restart=always 229 | RestartSec=1 230 | User=root 231 | StandardOutput=syslog 232 | StandardError=syslog 233 | WorkingDirectory=/usr/local/blockscout 234 | ExecStart=/usr/bin/mix phx.server 235 | EnvironmentFile=/usr/local/blockscout/env_vars.env 236 | 237 | [Install] 238 | WantedBy=multi-user.target 239 | ``` 240 | 241 | ### Enable starting service on system boot 242 | ```bash 243 | sudo systemctl daemon-reload 244 | sudo systemctl enable explorer.service 245 | ``` 246 | 247 | ### Move your Blockscout clone folder to system wide location 248 | Blockscout service needs to have access to the folder you've cloned from Blockscout repo and compiled all the assets. 249 | ```bash 250 | sudo mv ~/blockscout /usr/local 251 | ``` 252 | 253 | ### Create env vars file which will be used by Blockscout service 254 | :::info 255 | Use the same environment variables as you've set in Part 2. 256 | ::: 257 | 258 | ```bash 259 | sudo touch /usr/local/blockscout/env_vars.env 260 | # use your favorite text editor 261 | sudo vi /usr/local/blockscout/env_vars.env 262 | 263 | # env_vars.env file should hold these values ( adjusted for your environment ) 264 | ETHEREUM_JSONRPC_HTTP_URL=https://rpc.poa.psdk.io:8545 265 | ETHEREUM_JSONRPC_TRACE_URL=https://rpc.poa.psdk.io:8545 266 | DATABASE_URL=postgresql://blockscout:Passw0Rd@db.instance.local:5432/blockscout 267 | SECRET_KEY_BASE=VTIB3uHDNbvrY0+60ZWgUoUBKDn9ppLR8MI4CpRz4/qLyEFs54ktJfaNT6Z221No 268 | HEART_COMMAND="systemctl start explorer.service" 269 | ``` 270 | Save the file and exit. 271 | 272 | ### Finaly start Blockscout service 273 | ```bash 274 | sudo systemctl start explorer.service 275 | ``` 276 | 277 | ## Part 5 - test out the functionality of your Blockscout instance 278 | Now all thats left to do is to check if Blockscout service is running. 279 | Check service status with: 280 | ```bash 281 | sudo systemctl status explorer.service 282 | ``` 283 | 284 | To check service output: 285 | ```bash 286 | sudo journalctl -u explorer.service 287 | ``` 288 | 289 | You can check if there are some new listening ports: 290 | ```bash 291 | # if netstat is not installed 292 | sudo apt install net-tools 293 | sudo netstat -tulpn 294 | ``` 295 | 296 | You should get a list of listening ports and on the list there should be something like this: 297 | ``` 298 | tcp 0 0 0.0.0.0:5432 0.0.0.0:* LISTEN 28142/postgres 299 | tcp 0 0 0.0.0.0:4000 0.0.0.0:* LISTEN 42148/beam.smp 300 | tcp 0 0 0.0.0.0:4001 0.0.0.0:* LISTEN 42148/beam.smp 301 | ``` 302 | 303 | Blockscout web service runs by default on ports `4000`(http) and `4001`(https). 304 | If everythig is ok, you should be able to access the Blockscout web portal with `http://:4000` or `https://:4001` 305 | 306 | 307 | 308 | ## Final thoughts 309 | We've just deployed a single Blockscout instance, which works fine, but for production you should consider placing this instance behind a reverse proxy like Nginx. 310 | You sould also think about database and instance scalability, depending on your use case. 311 | 312 | You should definitely checkout the official [Blockscout documentation](https://docs.blockscout.com/) as there a lot of customisation options. -------------------------------------------------------------------------------- /docs/additional-features/chainbridge/definitions.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: definitions 3 | title: General Definitions 4 | --- 5 | 6 | 7 | ## Relayer 8 | Chainbridge is a relayer type bridge. The role of a relayer is to vote for the execution of a request (how many tokens to burn/release, for example). 9 | It monitors events from every chain, and votes for a proposal in the Bridge contract of the destination chain when it receives a `Deposit` event from a chain. A relayer calls a method in the Bridge contract to execute the proposal after the required number of votes are submitted. The bridge delegates execution to the Handler contract. 10 | 11 | 12 | ## Types of contracts 13 | In ChainBridge, there are three types of contracts on each chain, called Bridge/Handler/Target. 14 | 15 | | **Type** |**Description** | 16 | |----------|-------------------------------------------------------------------------------------------------------------------------------| 17 | | Bridge contract | A Bridge contract that manages requests, votes, executions needs to be deployed in each chain. Users will call `deposit` in Bridge to start a transfer, and Bridge delegates the process to the Handler contract corresponding to the Target contract. Once the Handler contract has been successful in calling the Target contract, Bridge contract emits a `Deposit` event to notify relayers.| 18 | | Handler contract | This contract interacts with the Target contract to execute a deposit or proposal. It validates the user's request, calls the Target contract and helps with some settings for the Target contract. There are certain Handler contracts to call each Target contract that has a different interface. The indirect calls by the Handler contract make the bridge to enable the transfer of whatever kind of assets or data. Currently, there are three types of Handler contracts implemented by ChainBridge: ERC20Handler, ERC721Handler, and GenericHandler. | 19 | | Target contract | A contract that manages assets to be exchanged or the messages that are transferred between chains. The interaction with this contract will be made from each side of the bridge. | 20 | 21 |
22 | 23 | ![ChainBridge Architecture](/img/chainbridge/architecture.svg) 24 | *ChainBridge Architecture* 25 | 26 |
27 | 28 |
29 | 30 | ![Workflow of ERC20 token transfer](/img/chainbridge/erc20-workflow.svg) 31 | *ex. Workflow of an ERC20 token transfer* 32 | 33 |
34 | 35 | ## Types of accounts 36 | 37 | Please make sure the accounts have enough native tokens to create transactions before starting. In Polygon Edge, you can assign premined balances to accounts when generating the genesis block. 38 | 39 | | **Type** |**Description** | 40 | |----------|-------------------------------------------------------------------------------------------------------------------------------| 41 | | Admin | This account will be given the admin role as default. | 42 | | User | The sender/recipient account that sends/receives assets. The sender account pays the gas fees when approving token transfers and calling deposit in the Bridge contract to begin a transfer. | 43 | 44 | :::info The admin role 45 | Certain actions can only be performed by the admin role account. By default, the deployer of the Bridge contract has the admin role. You will find below how to grant the admin role to another account or to remove it. 46 | 47 | ### Add admin role 48 | 49 | Adds an admin 50 | 51 | ```bash 52 | # Grant admin role 53 | $ cb-sol-cli admin add-admin \ 54 | --url [JSON_RPC_URL] \ 55 | --privateKey [PRIVATE_KEY] \ 56 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 57 | --admin "[NEW_ACCOUNT_ADDRESS]" 58 | ``` 59 | ### Revoke admin role 60 | 61 | Removes an admin 62 | 63 | ```bash 64 | # Revoke admin role 65 | $ cb-sol-cli admin remove-admin \ 66 | --url [JSON_RPC_URL] \ 67 | --privateKey [PRIVATE_KEY] \ 68 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 69 | --admin "[NEW_ACCOUNT_ADDRESS]" 70 | ``` 71 | 72 | ## The operations which are allowed by the `admin` account are as below. 73 | 74 | ### Set Resource 75 | 76 | Register a resource ID with a contract address for a handler. 77 | 78 | ```bash 79 | # Register new resource 80 | $ cb-sol-cli bridge register-resource \ 81 | --url [JSON_RPC_URL] \ 82 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 83 | --resourceId "[RESOURCE_ID]" \ 84 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 85 | --handler "[HANDLER_CONTRACT_ADDRESS]" \ 86 | --targetContract "[TARGET_CONTRACT_ADDRESS]" 87 | ``` 88 | 89 | ### Make contract burnable/mintable 90 | 91 | Set a token contract as mintable/burnable in a handler. 92 | 93 | ```bash 94 | # Let contract burnable/mintable 95 | $ cb-sol-cli bridge set-burn \ 96 | --url [JSON_RPC_URL] \ 97 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 98 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 99 | --handler "[HANDLER_CONTRACT_ADDRESS]" \ 100 | --tokenContract "[TARGET_CONTRACT_ADDRESS]" 101 | ``` 102 | 103 | ### Cancel proposal 104 | 105 | Cancel proposal for execution 106 | 107 | ```bash 108 | # Cancel ongoing proposal 109 | $ cb-sol-cli bridge cancel-proposal \ 110 | --url [JSON_RPC_URL] \ 111 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 112 | --resourceId "[RESOURCE_ID]" \ 113 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 114 | --chainId "[CHAIN_ID_OF_SOURCE_CHAIN]" \ 115 | --depositNonce "[NONCE]" 116 | ``` 117 | 118 | ### Pause/Unpause 119 | 120 | Pause deposits, proposal creation, voting, and deposit executions temporally. 121 | 122 | ```bash 123 | # Pause 124 | $ cb-sol-cli admin pause \ 125 | --url [JSON_RPC_URL] \ 126 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 127 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" 128 | 129 | # Unpause 130 | $ cb-sol-cli admin unpause \ 131 | --url [JSON_RPC_URL] \ 132 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 133 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" 134 | ``` 135 | 136 | ### Change Fee 137 | 138 | Change the fee which will be paid to Bridge Contract 139 | 140 | ```bash 141 | # Change fee for execution 142 | $ cb-sol-cli admin set-fee \ 143 | --url [JSON_RPC_URL] \ 144 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 145 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 146 | --fee [FEE_IN_WEI] 147 | ``` 148 | 149 | ### Add/Remove a relayer 150 | 151 | Add an account as a new relayer or remove an account from relayers 152 | 153 | ```bash 154 | # Add relayer 155 | $ cb-sol-cli admin add-relayer \ 156 | --url [JSON_RPC_URL] \ 157 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 158 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 159 | --relayer "[NEW_RELAYER_ADDRESS]" 160 | 161 | # Remove relayer 162 | $ cb-sol-cli admin remove-relayer \ 163 | --url [JSON_RPC_URL] \ 164 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 165 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 166 | --relayer "[RELAYER_ADDRESS]" 167 | ``` 168 | 169 | ### Change relayer threshold 170 | 171 | Change the number of votes required for a proposal execution 172 | 173 | ```bash 174 | # Remove relayer 175 | $ cb-sol-cli admin set-threshold \ 176 | --url [JSON_RPC_URL] \ 177 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 178 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 179 | --threshold [THRESHOLD] 180 | ``` 181 | ::: 182 | 183 | ## Chain ID 184 | 185 | The Chainbridge `chainId` is an arbitrary value used in the bridge for differentiating between the blockchain networks, and it has to be in the range of uint8. To not be confused with the chain ID of the network, they are not the same thing. This value needs to be unique, but it doesn't have to be the same as the ID of the network. 186 | 187 | In this example, we set `99` in `chainId`, because the chain ID of the Mumbai testnet is `80001`, which cannot be represented with a uint8. 188 | 189 | ## Resource ID 190 | 191 | A resource ID is a unique 32-bytes value in a cross-chain environment, associated with a certain asset (resource) that is being transferred between networks. 192 | 193 | The resource ID is arbitrary, but, as a convention, usually the last byte contains the chain ID of the source chain (the network from which this asset originated from). 194 | 195 | ## JSON-RPC URL for Polygon PoS 196 | 197 | For this guide, we’ll use https://rpc-mumbai.matic.today, a public JSON-RPC URL provided by Polygon, which may have traffic or rate-limits. This will be used only to connect with the Polygon Mumbai testnet. We advise you to obtain your JSON-RPC URL by an external service like Infura because deploying contracts will send many queries/requests to the JSON-RPC. 198 | 199 | ## Ways of processing the transfer of tokens 200 | When transferring ERC20 tokens between chains, they can be processed in two different modes: 201 | 202 | ### Lock/release mode 203 | Source chain: The tokens you are sending will be locked in the Handler Contract.
204 | Destination chain: The same amount of tokens as you sent in the source chain would be unlocked and transferred from the Handler contract to the recipient account in the destination chain. 205 | 206 | ### Burn/mint mode 207 | Source chain: The tokens you are sending will be burned.
208 | Destination chain: The same amount of tokens that you sent and burned on the source chain will be minted on the destination chain and sent to the recipient account. 209 | 210 | You can use different modes on each chain. It means that you can lock a token in the main chain while minting a token in the subchain for transfer. For instance, it may make sense to lock/release tokens if the total supply or mint schedule is controlled. Tokens would be minted/burned if the contract in the subchain has to follow the supply in the main chain. 211 | 212 | The default mode is lock/release mode. If you want to make the Tokens mintable/burnable, you need to call `adminSetBurnable` method. If you want to mint tokens on execution, you will need to grant `minter` role to the ERC20 Handler contract. 213 | 214 | 215 | -------------------------------------------------------------------------------- /docs/additional-features/chainbridge/overview.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: overview 3 | title: Overview 4 | --- 5 | 6 | ## What is ChainBridge? 7 | 8 | ChainBridge is a modular multi-directional blockchain bridge supporting EVM and Substrate compatible chains, built by ChainSafe. It allows users to transfer all kinds of assets or messages between two different chains. 9 | 10 | To find out more about ChainBridge, please first visit the [official docs](https://chainbridge.chainsafe.io/) provided by its developers. 11 | 12 | This guide is intended to help with the Chainbridge integration to Polygon Edge. It walks through the setup of a bridge between a running Polygon PoS (Mumbai testnet) and a local Polygon Edge network. 13 | 14 | ## Requirements 15 | 16 | In this guide, you will run Polygon Edge nodes, a ChainBridge relayer (more about it [here](/docs/additional-features/chainbridge/definitions)), and the cb-sol-cli tool, which is a CLI tool to deploy contracts locally, registering resource, and changing settings for the bridge (you can check [this](https://chainbridge.chainsafe.io/cli-options/#cli-options) too). The following environments are required before starting the setup: 17 | 18 | * Go: >= 1.17 19 | * Node.js >= 16.13.0 20 | * Git 21 | 22 | 23 | In addition, you will need to clone the following repositories with the versions to run some applications. 24 | 25 | * [Polygon Edge](https://github.com/0xPolygon/polygon-edge): on the `develop` branch 26 | * [ChainBridge](https://github.com/ChainSafe/ChainBridge): v1.1.5 27 | * [ChainBridge Deploy Tools](https://github.com/ChainSafe/chainbridge-deploy): `f2aa093` on `main` branch 28 | 29 | 30 | You need to set up a Polygon Edge network before proceeding to the next section. Please check [Local Setup](/docs/get-started/set-up-ibft-locally) or [Cloud Setup](/docs/get-started/set-up-ibft-on-the-cloud) for more details. -------------------------------------------------------------------------------- /docs/additional-features/chainbridge/setup-erc20-transfer.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: setup-erc20-transfer 3 | title: ERC20 Token Transfer 4 | --- 5 | 6 | So far, we've set up bridge to exchange assets/data between Polygon PoS and Polygon Edge chain. This section will guide you to set up an ERC20 bridge and send tokens between different blockchains. 7 | 8 | ## Step 1: Register resource ID 9 | 10 | Firstly, you will register a resource ID that associates resources in a cross-chain environment. A Resource ID is a 32-bytes value that must be unique to the resource that we are transferring between these blockchains. The Resource IDs are arbitrary, but they may have the chain ID of the home chain in the last byte, as a convention (home chain referring to the network on which these resources originated from). 11 | 12 | To register resource ID, you can use the `cb-sol-cli bridge register-resource` command. You will need to give the private key of the `admin` account. 13 | 14 | ```bash 15 | # For Polygon PoS chain 16 | $ cb-sol-cli bridge register-resource \ 17 | --url https://rpc-mumbai.matic.today \ 18 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 19 | --gasPrice [GAS_PRICE] \ 20 | # Set Resource ID for ERC20 21 | --resourceId "0x000000000000000000000000000000c76ebe4a02bbc34786d860b355f5a5ce00" \ 22 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 23 | --handler "[ERC20_HANDLER_CONTRACT_ADDRESS]" \ 24 | --targetContract "[ERC20_CONTRACT_ADDRESS]" 25 | 26 | # For Polygon Edge chain 27 | $ cb-sol-cli bridge register-resource \ 28 | --url http://localhost:10002 \ 29 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 30 | # Set Resource ID for ERC20 31 | --resourceId "0x000000000000000000000000000000c76ebe4a02bbc34786d860b355f5a5ce00" \ 32 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 33 | --handler "[ERC20_HANDLER_CONTRACT_ADDRESS]" \ 34 | --targetContract "[ERC20_CONTRACT_ADDRESS]" 35 | ``` 36 | 37 | ## (Optional) Make contracts mintable/burnable 38 | 39 | 40 | ```bash 41 | # Let ERC20 contract burn on source chain and mint on destination chain 42 | $ cb-sol-cli bridge set-burn \ 43 | --url http://localhost:10002 \ 44 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 45 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 46 | --handler "[ERC20_HANDLER_CONTRACT_ADDRESS]" \ 47 | --tokenContract "[ERC20_CONTRACT_ADDRESS]" 48 | 49 | # Grant minter role to ERC20 Handler contract 50 | $ cb-sol-cli erc20 add-minter \ 51 | --url http://localhost:10002 \ 52 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 53 | --erc20Address "[ERC20_CONTRACT_ADDRESS]" \ 54 | --minter "[ERC20_HANDLER_CONTRACT_ADDRESS]" 55 | ``` 56 | 57 | ## Step 2: Transfer ERC20 Token 58 | 59 | We will send ERC20 Tokens from the Polygon PoS chain to the Polygon Edge chain. 60 | 61 | First, you will get tokens by minting. An account with the `minter` role can mint new tokens. The account that has deployed the ERC20 contract has the `minter` role by default. To specify other accounts as members of the `minter` role, you need to run the `cb-sol-cli erc20 add-minter` command. 62 | 63 | ```bash 64 | # Mint ERC20 tokens 65 | $ cb-sol-cli erc20 mint \ 66 | --url https://rpc-mumbai.matic.today \ 67 | --privateKey [MINTER_ACCOUNT_PRIVATE_KEY] \ 68 | --gasPrice [GAS_PRICE] \ 69 | --erc20Address "[ERC20_CONTRACT_ADDRESS]" \ 70 | --amount 1000 71 | ``` 72 | 73 | To check the current balance, you can use `cb-sol-cli erc20 balance` command. 74 | 75 | ```bash 76 | # Check ERC20 token balance 77 | $ cb-sol-cli erc20 balance \ 78 | --url https://rpc-mumbai.matic.today \ 79 | --erc20Address "[ERC20_CONTRACT_ADDRESS]" \ 80 | --address "[ACCOUNT_ADDRESS]" 81 | 82 | [erc20/balance] Account has a balance of 1000.0 83 | ``` 84 | 85 | Next, you need to approve ERC20 token transfer from the account by ERC20 Handler 86 | 87 | ```bash 88 | # Approve transfer from the account by ERC20 Handler 89 | $ cb-sol-cli erc20 approve \ 90 | --url https://rpc-mumbai.matic.today \ 91 | --privateKey [USER_ACCOUNT_ADDRESS] \ 92 | --gasPrice [GAS_PRICE] \ 93 | --erc20Address "[ERC20_CONTRACT_ADDRESS]" \ 94 | --recipient "[ERC20_HANDLER_CONTRACT_ADDRESS]" \ 95 | --amount 500 96 | ``` 97 | 98 | To transfer tokens to Polygon Edge chains, you will call `deposit`. 99 | 100 | ```bash 101 | # Start transfer from Polygon PoS to Polygon Edge chain 102 | $ cb-sol-cli erc20 deposit \ 103 | --url https://rpc-mumbai.matic.today \ 104 | --privateKey [PRIVATE_KEY] \ 105 | --gasPrice [GAS_PRICE] \ 106 | --amount 10 \ 107 | # ChainID of Polygon Edge chain 108 | --dest 100 \ 109 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 110 | --recipient "[RECIPIENT_ADDRESS_IN_POLYGON_EDGE_CHAIN]" \ 111 | --resourceId "0x000000000000000000000000000000c76ebe4a02bbc34786d860b355f5a5ce00" 112 | ``` 113 | 114 | After the deposit transaction was successful, the relayer will get the event and vote for the proposal. It executes a transaction to send tokens to the recipient account in the Polygon Edge chain after the required number of votes are submitted. 115 | 116 | ```bash 117 | INFO[11-19|08:15:58] Handling fungible deposit event chain=mumbai dest=100 nonce=1 118 | INFO[11-19|08:15:59] Attempting to resolve message chain=polygon-edge type=FungibleTransfer src=99 dst=100 nonce=1 rId=000000000000000000000000000000c76ebe4a02bbc34786d860b355f5a5ce00 119 | INFO[11-19|08:15:59] Creating erc20 proposal chain=polygon-edge src=99 nonce=1 120 | INFO[11-19|08:15:59] Watching for finalization event chain=polygon-edge src=99 nonce=1 121 | INFO[11-19|08:15:59] Submitted proposal vote chain=polygon-edge tx=0x67a97849951cdf0480e24a95f59adc65ae75da23d00b4ab22e917a2ad2fa940d src=99 depositNonce=1 gasPrice=1 122 | INFO[11-19|08:16:24] Submitted proposal execution chain=polygon-edge tx=0x63615a775a55fcb00676a40e3c9025eeefec94d0c32ee14548891b71f8d1aad1 src=99 dst=100 nonce=1 gasPrice=5 123 | ``` 124 | 125 | Once the execution transaction has been successful, you will get tokens in the Polygon Edge chain. 126 | 127 | ```bash 128 | # Check the ERC20 balance in Polygon Edge chain 129 | $ cb-sol-cli erc20 balance \ 130 | --url https://localhost:10002 \ 131 | --privateKey [PRIVATE_KEY] \ 132 | --erc20Address "[ERC20_CONTRACT_ADDRESS]" \ 133 | --address "[ACCOUNT_ADDRESS]" 134 | 135 | [erc20/balance] Account has a balance of 10.0 136 | ``` 137 | -------------------------------------------------------------------------------- /docs/additional-features/chainbridge/setup-erc721-transfer.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: setup-erc721-transfer 3 | title: ERC721 NFT Transfer 4 | --- 5 | 6 | This section guides you through setting up an ERC721 bridge and sending NFTs between blockchain networks. 7 | 8 | ## Step 1: Register resource ID 9 | 10 | You will first need to register the resource ID for the ERC721 token in the Bridge contracts on both chains. 11 | 12 | ```bash 13 | # For Polygon PoS chain 14 | $ cb-sol-cli bridge register-resource \ 15 | --url https://rpc-mumbai.matic.today \ 16 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 17 | --gasPrice [GAS_PRICE] \ 18 | # Set ResourceID for ERC721 Token 19 | --resourceId "0x000000000000000000000000000000e389d61c11e5fe32ec1735b3cd38c69501" \ 20 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 21 | --handler "[ERC721_HANDLER_CONTRACT_ADDRESS]" \ 22 | --targetContract "[ERC721_CONTRACT_ADDRESS]" 23 | 24 | # For Polygon Edge chain 25 | $ cb-sol-cli bridge register-resource \ 26 | --url http://localhost:10002 \ 27 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 28 | # Set ResourceID for ERC721 Token 29 | --resourceId "0x000000000000000000000000000000e389d61c11e5fe32ec1735b3cd38c69501" \ 30 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 31 | --handler "[ERC721_HANDLER_CONTRACT_ADDRESS]" \ 32 | --targetContract "[ERC721_CONTRACT_ADDRESS]" 33 | ``` 34 | 35 | ## (Optional): Make contracts mintable/burnable 36 | 37 | To make the Tokens mintable/burnable, you will need to call the following commands: 38 | 39 | ```bash 40 | # Let ERC721 contract burn on source chain or mint on destination chain 41 | $ cb-sol-cli bridge set-burn \ 42 | --url http://localhost:10002 \ 43 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 44 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 45 | --handler "[ERC721_HANDLER_CONTRACT_ADDRESS]" \ 46 | --tokenContract "[ERC721_CONTRACT_ADDRESS]" 47 | 48 | # Grant minter role to ERC721 Handler contract (Only if you want to mint) 49 | $ cb-sol-cli erc721 add-minter \ 50 | --url http://localhost:10002 \ 51 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 52 | --erc721Address "[ERC721_CONTRACT_ADDRESS]" \ 53 | --minter "[ERC721_HANDLER_CONTRACT_ADDRESS]" 54 | ``` 55 | 56 | ## Step 2: Transfer NFT 57 | 58 | Firstly, you will mint an NFT if you need it. 59 | 60 | ```bash 61 | # Mint NFT 0x50 62 | $ cb-sol-cli erc721 mint \ 63 | --url https://rpc-mumbai.matic.today \ 64 | --privateKey [MINTER_ROLE_ACCOUNT] \ 65 | --gasPrice [GAS_PRICE] \ 66 | --erc721Address "[ERC721_CONTRACT_ADDRESS]" \ 67 | --id 0x50 68 | ``` 69 | 70 | To check the NFT owner, you can use `cb-sol-cli erc721 owner` 71 | 72 | ```bash 73 | # Check the current owner of NFT 74 | $ cb-sol-cli erc721 owner \ 75 | --url https://rpc-mumbai.matic.today \ 76 | --erc721Address "[ERC721_CONTRACT_ADDRESS]" \ 77 | --id 0x50 78 | ``` 79 | 80 | Then, you will approve a transfer of the NFT by ERC721 Handler 81 | 82 | ```bash 83 | # Approve transfer of the NFT 0x50 by ERC721 Handler 84 | $ cb-sol-cli erc721 approve \ 85 | --url https://rpc-mumbai.matic.today \ 86 | --privateKey [PRIVATE_KEY] \ 87 | --gasPrice [GAS_PRICE] \ 88 | --erc721Address "[ERC721_CONTRACT_ADDRESS]" \ 89 | --recipient "[ERC721_HANDLER_CONTRACT_ADDRESS]" \ 90 | --id 0x50 91 | ``` 92 | 93 | Finally, you will start the transfer 94 | 95 | ```bash 96 | # Start transfer from Polygon PoS to Polygon Edge chain 97 | $ cb-sol-cli erc721 deposit \ 98 | --url https://rpc-mumbai.matic.today \ 99 | --privateKey [PRIVATE_KEY] \ 100 | --gasPrice [GAS_PRICE] \ 101 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 102 | --resourceId "0x000000000000000000000000000000e389d61c11e5fe32ec1735b3cd38c69501" \ 103 | --id 0x50 \ 104 | # ChainID of Polygon Edge chain 105 | --dest 100 \ 106 | --recipient "[RECIPIENT_ADDRESS_IN_POLYGON_EDGE_CHAIN]" 107 | ``` 108 | 109 | The relayer will get the event and vote for the proposal. It executes a transaction to send NFTs to the recipient account in the Polygon Edge chain after the required number of votes are submitted. 110 | 111 | ```bash 112 | INFO[11-19|09:07:50] Handling nonfungible deposit event chain=mumbai 113 | INFO[11-19|09:07:50] Attempting to resolve message chain=polygon-edge type=NonFungibleTransfer src=99 dst=100 nonce=2 rId=000000000000000000000000000000e389d61c11e5fe32ec1735b3cd38c69501 114 | INFO[11-19|09:07:50] Creating erc721 proposal chain=polygon-edge src=99 nonce=2 115 | INFO[11-19|09:07:50] Watching for finalization event chain=polygon-edge src=99 nonce=2 116 | INFO[11-19|09:07:50] Submitted proposal vote chain=polygon-edge tx=0x58a22d84a08269ad2e8d52d8dc038621f1a21109d11c7b6e0d32d5bf21ea8505 src=99 depositNonce=2 gasPrice=1 117 | INFO[11-19|09:08:15] Submitted proposal execution chain=polygon-edge tx=0x57419844881a07531e31667c609421662d94d21d0709e64fb728138309267e68 src=99 dst=100 nonce=2 gasPrice=3 118 | ``` 119 | 120 | You can check the owner of the NFT on the Polygon Edge network after the execution is completed. 121 | 122 | ```bash 123 | # Check the owner of NFT 0x50 in Polygon Edge chain 124 | $ cb-sol-cli erc721 owner \ 125 | --url http://localhost:10002 \ 126 | --erc721Address "[ERC721_CONTRACT_ADDRESS]" \ 127 | --id 0x50 128 | ``` 129 | -------------------------------------------------------------------------------- /docs/additional-features/chainbridge/setup.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: setup 3 | title: Setup 4 | --- 5 | 6 | ## Contracts deployment 7 | 8 | In this section, you will deploy the required contracts to the Polygon PoS and Polygon Edge chain with `cb-sol-cli`. 9 | 10 | ```bash 11 | # Setup for cb-sol-cli command 12 | $ git clone https://github.com/ChainSafe/chainbridge-deploy.git 13 | $ cd chainbridge-deploy/cb-sol-cli 14 | $ make install 15 | ``` 16 | 17 | Firstly, we will deploy contracts to the Polygon PoS chain by `cb-sol-cli deploy` command. `--all` flag makes the command deploy all the contracts, including Bridge, ERC20 Handler, ERC721 Handler, Generic Handler, ERC20, and ERC721 contract. In addition, it'll set the default relayer account address and the threshold 18 | 19 | ```bash 20 | # Deploy all required contracts into Polygon PoS chain 21 | $ cb-sol-cli deploy --all --chainId 99 \ 22 | --url https://rpc-mumbai.matic.today \ 23 | --gasPrice [GAS_PRICE] \ 24 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 25 | --relayers [RELAYER_ACCOUNT_ADDRESS] \ 26 | --relayerThreshold 1 27 | ``` 28 | 29 | 30 | Learn about chainID and JSON-RPC URL [here](/docs/additional-features/chainbridge/definitions) 31 | 32 | :::caution 33 | 34 | The default gas price in `cb-sol-cli` is `20000000` (`0.02 Gwei`). To set the appropriate gas price in a transaction, please set the value using the `--gasPrice` argument. 35 | 36 | ```bash 37 | $ cb-sol-cli deploy --all --chainId 99 \ 38 | --url https://rpc-mumbai.matic.today \ 39 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 40 | --relayers [RELAYER_ACCOUNT_ADDRESS] \ 41 | --relayerThreshold 1 \ 42 | # Set gas price to 5 Gwei 43 | --gasPrice 5000000000 44 | ``` 45 | 46 | ::: 47 | 48 | :::caution 49 | 50 | The Bridge contract takes approximately 0x3f97b8 (4167608) gas to deploy. Please make sure the blocks being generated have enough block gas limit to contain the contract creation transaction. To learn more about changing block gas limit in Polygon Edge, please visit 51 | the [Local Setup](/docs/get-started/set-up-ibft-locally) 52 | 53 | ::: 54 | 55 | Once the contracts have been deployed, you will get the following result: 56 | 57 | ```bash 58 | Deploying contracts... 59 | ✓ Bridge contract deployed 60 | ✓ ERC20Handler contract deployed 61 | ✓ ERC721Handler contract deployed 62 | ✓ GenericHandler contract deployed 63 | ✓ ERC20 contract deployed 64 | WARNING: Multiple definitions for safeTransferFrom 65 | ✓ ERC721 contract deployed 66 | 67 | ================================================================ 68 | Url: https://rpc-mumbai.matic.today 69 | Deployer: 70 | Gas Limit: 8000000 71 | Gas Price: 20000000 72 | Deploy Cost: 0.00029065308 73 | 74 | Options 75 | ======= 76 | Chain Id: 77 | Threshold: 78 | Relayers: 79 | Bridge Fee: 0 80 | Expiry: 100 81 | 82 | Contract Addresses 83 | ================================================================ 84 | Bridge: 85 | ---------------------------------------------------------------- 86 | Erc20 Handler: 87 | ---------------------------------------------------------------- 88 | Erc721 Handler: 89 | ---------------------------------------------------------------- 90 | Generic Handler: 91 | ---------------------------------------------------------------- 92 | Erc20: 93 | ---------------------------------------------------------------- 94 | Erc721: 95 | ---------------------------------------------------------------- 96 | Centrifuge Asset: Not Deployed 97 | ---------------------------------------------------------------- 98 | WETC: Not Deployed 99 | ================================================================ 100 | ``` 101 | 102 | Now we may deploy the contracts to the Polygon Edge chain. 103 | 104 | ```bash 105 | # Deploy all required contracts into Polygon Edge chain 106 | $ cb-sol-cli deploy --all --chainId 100 \ 107 | --url http://localhost:10002 \ 108 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 109 | --relayers [RELAYER_ACCOUNT_ADDRESS] \ 110 | --relayerThreshold 1 111 | ``` 112 | 113 | Save the terminal outputs with the deployed smart contract addresses as we will need them for the next step. 114 | 115 | ## Relayer setup 116 | 117 | In this section, you will start a relayer to exchange data between 2 chains. 118 | 119 | First, we need to clone and build the ChainBridge repository. 120 | 121 | ```bash 122 | $ git clone https://github.com/ChainSafe/ChainBridge.git 123 | $ cd chainBridge && make install 124 | ``` 125 | 126 | Next, You need to create `config.json` and set the JSON-RPC URLs, relayer address, and contracts address for each chain. 127 | 128 | ```json 129 | { 130 | "chains": [ 131 | { 132 | "name": "mumbai", 133 | "type": "ethereum", 134 | "id": "99", 135 | "endpoint": "https://rpc-mumbai.matic.today", 136 | "from": "", 137 | "opts": { 138 | "bridge": "", 139 | "erc20Handler": "", 140 | "erc721Handler": "", 141 | "genericHandler": "", 142 | "minGasPrice": "1", 143 | "http": "true" 144 | } 145 | }, 146 | { 147 | "name": "polygon-edge", 148 | "type": "ethereum", 149 | "id": "100", 150 | "endpoint": "http://localhost:10002", 151 | "from": "", 152 | "opts": { 153 | "bridge": "", 154 | "erc20Handler": "", 155 | "erc721Handler": "", 156 | "genericHandler": "", 157 | "minGasPrice": "1", 158 | "http": "true" 159 | } 160 | } 161 | ] 162 | } 163 | ``` 164 | 165 | To start a relayer, you need to import the private key corresponding to the relayer account address. You will need to input the password when you import private key. Once the import has been successful, the key will be stored under `keys/
.key`. 166 | 167 | ```bash 168 | # Import private key and store to local with encryption 169 | $ chainbridge accounts import --privateKey [RELAYER_ACCOUNT_PRIVATE_KEY] 170 | 171 | INFO[11-19|07:09:01] Importing key... 172 | Enter password to encrypt keystore file: 173 | > [PASSWORD_TO_ENCRYPT_KEY] 174 | INFO[11-19|07:09:05] private key imported address= file=.../keys/.key 175 | ``` 176 | 177 | Then, you can start the relayer. You will need to input the same password you chose for storing the key in the beginning. 178 | 179 | ```bash 180 | # Start relayer 181 | $ chainbridge --config config.json --latest 182 | 183 | INFO[11-19|07:15:19] Starting ChainBridge... 184 | Enter password for key ./keys/.key: 185 | > [PASSWORD_TO_DECRYPT_KEY] 186 | INFO[11-19|07:15:25] Connecting to ethereum chain... chain=mumbai url= 187 | Enter password for key ./keys/.key: 188 | > [PASSWORD_TO_DECRYPT_KEY] 189 | INFO[11-19|07:15:31] Connecting to ethereum chain... chain=polygon-edge url= 190 | ``` 191 | 192 | Once the relayer has begun, it will start to watch new blocks on each chain. 193 | -------------------------------------------------------------------------------- /docs/additional-features/chainbridge/use-case-erc20-bridge.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: use-case-erc20-bridge 3 | title: Use case - ERC20 Bridge 4 | description: Example for to bridge ERC20 contract 5 | keywords: 6 | - docs 7 | - polygon 8 | - edge 9 | - Bridge 10 | - ERC20 11 | --- 12 | 13 | This section aims to give you a setup flow of ERC20 Bridge for a practical use case. 14 | 15 | In this guide, you will use Mumbai Polygon PoS testnet and Polygon Edge local chain. Please make sure you have JSON-RPC endpoint for Mumbai and you've set up Polygon Edge in local environment. Please refer to [Local Setup](/docs/get-started/set-up-ibft-locally) or [Cloud Setup](/docs/get-started/set-up-ibft-on-the-cloud) for more details. 16 | 17 | ## Scenario 18 | 19 | This scenario is to setup a Bridge for the ERC20 token that has been deployed in public chain (Polygon PoS) already in order to enable low-cost transfer in a private chain (Polygon Edge) for users in a regular case. In such a case, the total supply of token has been defined in the public chain and only the amount of the token which has been transferred from the public chain to the private chain must exist in the private chain. For that reason, you'll need to use lock/release mode in the public chain and burn/mint mode in the private chain. 20 | 21 | When sending tokens from the public chain to the private chain, the token will be locked in ERC20 Handler contract of the public chain and the same amount of token will be minted in the private chain. On the other hand, in case of transfer from the private chain to the public chain, the token in the private chain will be burned and the same amount of token will be released from ERC20 Handler contract in the public chain. 22 | 23 | ## Contracts 24 | 25 | Explaining with a simple ERC20 contracts instead of the contract developed by ChainBridge. For burn/mint mode, ERC20 contract must have `mint` and `burnFrom` methods in addition to the methods for ERC20 like this: 26 | 27 | ```sol 28 | pragma solidity ^0.8.14; 29 | 30 | import "@openzeppelin/contracts/token/ERC20/ERC20.sol"; 31 | import "@openzeppelin/contracts/access/AccessControl.sol"; 32 | 33 | contract SampleToken is ERC20, AccessControl { 34 | bytes32 public constant MINTER_ROLE = keccak256("MINTER_ROLE"); 35 | bytes32 public constant BURNER_ROLE = keccak256("BURNER_ROLE"); 36 | 37 | constructor(string memory name, string memory symbol) ERC20(name, symbol) { 38 | _setupRole(DEFAULT_ADMIN_ROLE, _msgSender()); 39 | _setupRole(MINTER_ROLE, _msgSender()); 40 | _setupRole(BURNER_ROLE, _msgSender()); 41 | } 42 | 43 | function mint(address recipient, uint256 amount) 44 | external 45 | onlyRole(MINTER_ROLE) 46 | { 47 | _mint(recipient, amount); 48 | } 49 | 50 | function burnFrom(address owner, uint256 amount) 51 | external 52 | onlyRole(BURNER_ROLE) 53 | { 54 | _burn(owner, amount); 55 | } 56 | } 57 | ``` 58 | 59 | All codes and scripts are in Github Repo [Trapesys/chainbridge-example](https://github.com/Trapesys/chainbridge-example). 60 | 61 | ## Step1: Deploy Bridge and ERC20 Handler contracts 62 | 63 | Firstly, you'll deploy Bridge and ERC20Handler contracts using `cb-sol-cli` in the both chains. 64 | 65 | ```bash 66 | # Deploy Bridge and ERC20 contracts in Polygon PoS chain 67 | $ cb-sol-cli deploy --bridge --erc20Handler --chainId 99 \ 68 | --url https://rpc-mumbai.matic.today \ 69 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 70 | --gasPrice [GAS_PRICE] \ 71 | --relayers [RELAYER_ACCOUNT_ADDRESS] \ 72 | --relayerThreshold 1 73 | ``` 74 | 75 | ```bash 76 | # Deploy Bridge and ERC20 contracts in Polygon Edge chain 77 | $ cb-sol-cli deploy --bridge --erc20Handler --chainId 100 \ 78 | --url http://localhost:10002 \ 79 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 80 | --relayers [RELAYER_ACCOUNT_ADDRESS] \ 81 | --relayerThreshold 1 82 | ``` 83 | 84 | You'll get Bridge and ERC20Handler contract addresses like this: 85 | 86 | ```bash 87 | Deploying contracts... 88 | ✓ Bridge contract deployed 89 | ✓ ERC20Handler contract deployed 90 | 91 | ================================================================ 92 | Url: https://rpc-mumbai.matic.today 93 | Deployer: 94 | Gas Limit: 8000000 95 | Gas Price: 20000000 96 | Deploy Cost: 0.00029065308 97 | 98 | Options 99 | ======= 100 | Chain Id: 101 | Threshold: 102 | Relayers: 103 | Bridge Fee: 0 104 | Expiry: 100 105 | 106 | Contract Addresses 107 | ================================================================ 108 | Bridge: 109 | ---------------------------------------------------------------- 110 | Erc20 Handler: 111 | ---------------------------------------------------------------- 112 | Erc721 Handler: Not Deployed 113 | ---------------------------------------------------------------- 114 | Generic Handler: Not Deployed 115 | ---------------------------------------------------------------- 116 | Erc20: Not Deployed 117 | ---------------------------------------------------------------- 118 | Erc721: Not Deployed 119 | ---------------------------------------------------------------- 120 | Centrifuge Asset: Not Deployed 121 | ---------------------------------------------------------------- 122 | WETC: Not Deployed 123 | ================================================================ 124 | ``` 125 | 126 | ## Step2: Deploy your ERC20 contract 127 | 128 | You'll deploy your ERC20 contract. This example guides you with hardhat project [Trapesys/chainbridge-example](https://github.com/Trapesys/chainbridge-example). 129 | 130 | ```bash 131 | $ git clone https://github.com/Trapesys/chainbridge-example.git 132 | $ cd chainbridge-example 133 | $ npm i 134 | ``` 135 | 136 | Please create `.env` file and set the following values. 137 | 138 | ```.env 139 | PRIVATE_KEYS=0x... 140 | MUMBAI_JSONRPC_URL=https://rpc-mumbai.matic.today 141 | EDGE_JSONRPC_URL=http://localhost:10002 142 | ``` 143 | 144 | Next you'll deploy ERC20 contract in the both chains. 145 | 146 | ```bash 147 | $ npx hardhat deploy --contract erc20 --name --symbol --network mumbai 148 | ``` 149 | 150 | ```bash 151 | $ npx hardhat deploy --contract erc20 --name --symbol --network edge 152 | ``` 153 | 154 | After deployment is successful, you'll get a contract address like this: 155 | 156 | ```bash 157 | ERC20 contract has been deployed 158 | Address: 159 | Name: 160 | Symbol: 161 | ``` 162 | 163 | ## Step3: Register resource ID in Bridge 164 | 165 | You will register a resource ID that associates resource in a cross-chain environment. You need to set the same resource ID in the both chain. 166 | 167 | ```bash 168 | $ cb-sol-cli bridge register-resource \ 169 | --url https://rpc-mumbai.matic.today \ 170 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 171 | --gasPrice [GAS_PRICE] \ 172 | --resourceId "0x000000000000000000000000000000c76ebe4a02bbc34786d860b355f5a5ce00" \ 173 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 174 | --handler "[ERC20_HANDLER_CONTRACT_ADDRESS]" \ 175 | --targetContract "[ERC20_CONTRACT_ADDRESS]" 176 | ``` 177 | 178 | ```bash 179 | $ cb-sol-cli bridge register-resource \ 180 | --url http://localhost:10002 \ 181 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 182 | --resourceId "0x000000000000000000000000000000c76ebe4a02bbc34786d860b355f5a5ce00" \ 183 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 184 | --handler "[ERC20_HANDLER_CONTRACT_ADDRESS]" \ 185 | --targetContract "[ERC20_CONTRACT_ADDRESS]" 186 | ``` 187 | 188 | ## Step4: Set Mint/Burn mode in ERC20 bridge of the Edge 189 | 190 | Bridge expects to work as burn/mint mode in Polygon Edge. You'll set burn/mint mode using `cb-sol-cli`. 191 | 192 | ```bash 193 | $ cb-sol-cli bridge set-burn \ 194 | --url http://localhost:10002 \ 195 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 196 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 197 | --handler "[ERC20_HANDLER_CONTRACT_ADDRESS]" \ 198 | --tokenContract "[ERC20_CONTRACT_ADDRESS]" 199 | ``` 200 | 201 | And you need to grant a minter and burner role to the ERC20 Handler contract. 202 | 203 | ```bash 204 | $ npx hardhat grant --role mint --contract [ERC20_CONTRACT_ADDRESS] --address [ERC20_HANDLER_CONTRACT_ADDRESS] --network edge 205 | $ npx hardhat grant --role burn --contract [ERC20_CONTRACT_ADDRESS] --address [ERC20_HANDLER_CONTRACT_ADDRESS] --network edge 206 | ``` 207 | 208 | ## Step5: Mint Token 209 | 210 | You'll mint new ERC20 tokens in Mumbai chain. 211 | 212 | ```bash 213 | $ npx hardhat mint --type erc20 --contract [ERC20_CONTRACT_ADDRESS] --address [ACCOUNT_ADDRESS] --amount 100000000000000000000 --network mumbai # 100 Token 214 | ``` 215 | 216 | After the transaction is successful, the account will have the minted token. 217 | 218 | ## Step6: Start ERC20 transfer 219 | 220 | Before starting this step, please make sure that you've started a relayer. Please check [Setup](/docs/additional-features/chainbridge/setup) for more details. 221 | 222 | During token transfer from Mumbai to Edge, ERC20 Handler contract in Mumbai withdraws tokens from your account. You'll call approve before transfer. 223 | 224 | ```bash 225 | $ npx hardhat approve --type erc20 --contract [ERC20_CONTRACT_ADDRESS] --address [ERC20_HANDLER_CONTRACT_ADDRESS] --amount 10000000000000000000 --network mumbai # 10 Token 226 | ``` 227 | 228 | Finally, you'll start token transfer from Mumbai to Edge using `cb-sol-cli`. 229 | 230 | ```bash 231 | # Start transfer from Mumbai to Polygon Edge chain 232 | $ cb-sol-cli erc20 deposit \ 233 | --url https://rpc-mumbai.matic.today \ 234 | --privateKey [PRIVATE_KEY] \ 235 | --gasPrice [GAS_PRICE] \ 236 | --amount 10 \ 237 | # ChainID of Polygon Edge chain 238 | --dest 100 \ 239 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 240 | --recipient "[RECIPIENT_ADDRESS_IN_POLYGON_EDGE_CHAIN]" \ 241 | --resourceId "0x000000000000000000000000000000c76ebe4a02bbc34786d860b355f5a5ce00" 242 | ``` 243 | 244 | After the deposit transaction is successful, the relayer will get the event and vote for the proposal. It executes a transaction to send tokens to the recipient account in the Polygon Edge chain after the required number of votes are submitted. 245 | 246 | ```bash 247 | INFO[11-19|08:15:58] Handling fungible deposit event chain=mumbai dest=100 nonce=1 248 | INFO[11-19|08:15:59] Attempting to resolve message chain=polygon-edge type=FungibleTransfer src=99 dst=100 nonce=1 rId=000000000000000000000000000000c76ebe4a02bbc34786d860b355f5a5ce00 249 | INFO[11-19|08:15:59] Creating erc20 proposal chain=polygon-edge src=99 nonce=1 250 | INFO[11-19|08:15:59] Watching for finalization event chain=polygon-edge src=99 nonce=1 251 | INFO[11-19|08:15:59] Submitted proposal vote chain=polygon-edge tx=0x67a97849951cdf0480e24a95f59adc65ae75da23d00b4ab22e917a2ad2fa940d src=99 depositNonce=1 gasPrice=1 252 | INFO[11-19|08:16:24] Submitted proposal execution chain=polygon-edge tx=0x63615a775a55fcb00676a40e3c9025eeefec94d0c32ee14548891b71f8d1aad1 src=99 dst=100 nonce=1 gasPrice=5 253 | ``` 254 | 255 | Once the execution transaction is successful, you will get tokens in the Polygon Edge chain. 256 | -------------------------------------------------------------------------------- /docs/additional-features/chainbridge/use-case-erc721-bridge.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: use-case-erc721-bridge 3 | title: Use case - ERC721 Bridge 4 | description: Example for to bridge ERC721 contract 5 | keywords: 6 | - docs 7 | - polygon 8 | - edge 9 | - Bridge 10 | - ERC20 11 | --- 12 | 13 | This section aims to give you a setup flow of ERC721 Bridge for a practical use case. 14 | 15 | In this guide, you will use Mumbai Polygon PoS testnet and Polygon Edge local chain. Please make sure you have JSON-RPC endpoint for Mumbai and you've set up Polygon Edge in local environment. Please refer to [Local Setup](/docs/get-started/set-up-ibft-locally) or [Cloud Setup](/docs/get-started/set-up-ibft-on-the-cloud) for more details. 16 | 17 | ## Scenario 18 | 19 | This scenario is to setup a Bridge for the ERC721 NFT that has been deployed in public chain (Polygon PoS) already in order to enable low-cost transfer in a private chain (Polygon Edge) for users in a regular case. In such a case, the original metadata has been defined in the public chain and the only NFTs that have been transferred from Public chain can exist in the private chain. For that reason, you'll need to use lock/release mode in the public chain and burn/mint mode in the private chain. 20 | 21 | When sending NFTs from the public chain to the private chain, the NFT will be locked in ERC721 Handler contract in the public chain and the same NFT will be minted in the private chain. On the other hand, in case of transfer from the private chain to the public chain, the NFT in the private chain will be burned and the same NFT will be released from ERC721 Handler contract in the public chain. 22 | 23 | ## Contracts 24 | 25 | Explaining with a simple ERC721 contract instead of the contract developed by ChainBridge. For burn/mint mode, ERC721 contract must have `mint` and `burn` methods in addition to the methods defined in ERC721 like this: 26 | 27 | ```sol 28 | pragma solidity ^0.8.14; 29 | 30 | import "@openzeppelin/contracts/token/ERC721/ERC721.sol"; 31 | import "@openzeppelin/contracts/token/ERC20/ERC20.sol"; 32 | import "@openzeppelin/contracts/access/AccessControl.sol"; 33 | import "@openzeppelin/contracts/token/ERC721/extensions/ERC721Burnable.sol"; 34 | import "@openzeppelin/contracts/token/ERC721/extensions/ERC721URIStorage.sol"; 35 | 36 | contract SampleNFT is ERC721, ERC721Burnable, ERC721URIStorage, AccessControl { 37 | bytes32 public constant MINTER_ROLE = keccak256("MINTER_ROLE"); 38 | bytes32 public constant BURNER_ROLE = keccak256("BURNER_ROLE"); 39 | 40 | string public baseURI; 41 | 42 | constructor( 43 | string memory name, 44 | string memory symbol, 45 | string memory baseURI 46 | ) ERC721(name, symbol) { 47 | _setupRole(DEFAULT_ADMIN_ROLE, _msgSender()); 48 | _setupRole(MINTER_ROLE, _msgSender()); 49 | _setupRole(BURNER_ROLE, _msgSender()); 50 | 51 | _setBaseURI(baseURI); 52 | } 53 | 54 | function mint( 55 | address recipient, 56 | uint256 tokenID, 57 | string memory data 58 | ) public onlyRole(MINTER_ROLE) { 59 | _mint(recipient, tokenID); 60 | _setTokenURI(tokenID, data); 61 | } 62 | 63 | function burn(uint256 tokenID) 64 | public 65 | override(ERC721Burnable) 66 | onlyRole(BURNER_ROLE) 67 | { 68 | _burn(tokenID); 69 | } 70 | 71 | function tokenURI(uint256 tokenId) 72 | public 73 | view 74 | virtual 75 | override(ERC721, ERC721URIStorage) 76 | returns (string memory) 77 | { 78 | return super.tokenURI(tokenId); 79 | } 80 | 81 | function supportsInterface(bytes4 interfaceId) 82 | public 83 | view 84 | override(ERC721, AccessControl) 85 | returns (bool) 86 | { 87 | return super.supportsInterface(interfaceId); 88 | } 89 | 90 | function _burn(uint256 tokenId) 91 | internal 92 | virtual 93 | override(ERC721, ERC721URIStorage) 94 | { 95 | super._burn(tokenId); 96 | } 97 | 98 | function _setBaseURI(string memory baseURI_) internal { 99 | baseURI = baseURI_; 100 | } 101 | 102 | function _baseURI() internal view virtual override returns (string memory) { 103 | return baseURI; 104 | } 105 | } 106 | ``` 107 | 108 | All codes and scripts are in Github Repo [Trapesys/chainbridge-example](https://github.com/Trapesys/chainbridge-example). 109 | 110 | ## Step1: Deploy Bridge and ERC721 Handler contracts 111 | 112 | Firstly, you'll deploy Bridge and ERC721Handler contracts using `cb-sol-cli` in the both chains. 113 | 114 | ```bash 115 | # Deploy Bridge and ERC721 contracts in Polygon PoS chain 116 | $ cb-sol-cli deploy --bridge --erc721Handler --chainId 99 \ 117 | --url https://rpc-mumbai.matic.today \ 118 | --gasPrice [GAS_PRICE] \ 119 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 120 | --relayers [RELAYER_ACCOUNT_ADDRESS] \ 121 | --relayerThreshold 1 122 | ``` 123 | 124 | ```bash 125 | # Deploy Bridge and ERC721 contracts in Polygon Edge chain 126 | $ cb-sol-cli deploy --bridge --erc721Handler --chainId 100 \ 127 | --url http://localhost:10002 \ 128 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 129 | --relayers [RELAYER_ACCOUNT_ADDRESS] \ 130 | --relayerThreshold 1 131 | ``` 132 | 133 | You'll get Bridge and ERC721Handler contract addresses like this: 134 | 135 | ```bash 136 | Deploying contracts... 137 | ✓ Bridge contract deployed 138 | ✓ ERC721Handler contract deployed 139 | 140 | ================================================================ 141 | Url: https://rpc-mumbai.matic.today 142 | Deployer: 143 | Gas Limit: 8000000 144 | Gas Price: 20000000 145 | Deploy Cost: 0.00029065308 146 | 147 | Options 148 | ======= 149 | Chain Id: 150 | Threshold: 151 | Relayers: 152 | Bridge Fee: 0 153 | Expiry: 100 154 | 155 | Contract Addresses 156 | ================================================================ 157 | Bridge: 158 | ---------------------------------------------------------------- 159 | Erc20 Handler: Not Deployed 160 | ---------------------------------------------------------------- 161 | Erc721 Handler: 162 | ---------------------------------------------------------------- 163 | Generic Handler: Not Deployed 164 | ---------------------------------------------------------------- 165 | Erc20: Not Deployed 166 | ---------------------------------------------------------------- 167 | Erc721: Not Deployed 168 | ---------------------------------------------------------------- 169 | Centrifuge Asset: Not Deployed 170 | ---------------------------------------------------------------- 171 | WETC: Not Deployed 172 | ================================================================ 173 | ``` 174 | 175 | ## Step2: Deploy your ERC721 contract 176 | 177 | You'll deploy your ERC721 contract. This example guides you with hardhat project [Trapesys/chainbridge-example](https://github.com/Trapesys/chainbridge-example). 178 | 179 | ```bash 180 | $ git clone https://github.com/Trapesys/chainbridge-example.git 181 | $ cd chainbridge-example 182 | $ npm i 183 | ``` 184 | 185 | Please create `.env` file and set the following values. 186 | 187 | ```.env 188 | PRIVATE_KEYS=0x... 189 | MUMBAI_JSONRPC_URL=https://rpc-mumbai.matic.today 190 | EDGE_JSONRPC_URL=http://localhost:10002 191 | ``` 192 | 193 | Next you'll deploy ERC721 contract in the both chains. 194 | 195 | ```bash 196 | $ npx hardhat deploy --contract erc721 --name --symbol --uri --network mumbai 197 | ``` 198 | 199 | ```bash 200 | $ npx hardhat deploy --contract erc721 --name --symbol --uri --network edge 201 | ``` 202 | 203 | After deployment is successful, you'll get contract address like this: 204 | 205 | ```bash 206 | ERC721 contract has been deployed 207 | Address: 208 | Name: 209 | Symbol: 210 | Base URI: 211 | ``` 212 | 213 | ## Step3: Register resource ID in Bridge 214 | 215 | You will register a resource ID that associates resources in a cross-chain environment. 216 | 217 | ```bash 218 | $ cb-sol-cli bridge register-resource \ 219 | --url https://rpc-mumbai.matic.today \ 220 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 221 | --gasPrice [GAS_PRICE] \ 222 | # Set Resource ID for ERC721 223 | --resourceId "0x000000000000000000000000000000e389d61c11e5fe32ec1735b3cd38c69501" \ 224 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 225 | --handler "[ERC721_HANDLER_CONTRACT_ADDRESS]" \ 226 | --targetContract "[ERC721_CONTRACT_ADDRESS]" 227 | ``` 228 | 229 | ```bash 230 | $ cb-sol-cli bridge register-resource \ 231 | --url http://localhost:10002 \ 232 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 233 | # Set Resource ID for ERC721 234 | --resourceId "0x000000000000000000000000000000e389d61c11e5fe32ec1735b3cd38c69501" \ 235 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 236 | --handler "[ERC721_HANDLER_CONTRACT_ADDRESS]" \ 237 | --targetContract "[ERC721_CONTRACT_ADDRESS]" 238 | ``` 239 | 240 | ## Step4: Set Mint/Burn mode in ERC721 bridge of the Edge 241 | 242 | Bridge expects to work as burn/mint mode in Edge. You'll set burn/mint mode. 243 | 244 | ```bash 245 | $ cb-sol-cli bridge set-burn \ 246 | --url http://localhost:10002 \ 247 | --privateKey [ADMIN_ACCOUNT_PRIVATE_KEY] \ 248 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 249 | --handler "[ERC721_HANDLER_CONTRACT_ADDRESS]" \ 250 | --tokenContract "[ERC721_CONTRACT_ADDRESS]" 251 | ``` 252 | 253 | And you need to grant a minter and burner role to the ERC721 Handler contract. 254 | 255 | ```bash 256 | $ npx hardhat grant --role mint --contract [ERC721_CONTRACT_ADDRESS] --address [ERC721_HANDLER_CONTRACT_ADDRESS] --network edge 257 | $ npx hardhat grant --role burn --contract [ERC721_CONTRACT_ADDRESS] --address [ERC721_HANDLER_CONTRACT_ADDRESS] --network edge 258 | ``` 259 | 260 | ## Step5: Mint NFT 261 | 262 | You'll mint new ERC721 NFT in Mumbai chain. 263 | 264 | ```bash 265 | $ npx hardhat mint --type erc721 --contract [ERC721_CONTRACT_ADDRESS] --address [ACCOUNT_ADDRESS] --id 0x50 --data hello.json --network mumbai 266 | ``` 267 | 268 | After transaction is successful, the account will have the minted NFT. 269 | 270 | ## Step6: Start ERC721 transfer 271 | 272 | Before starting this step, please make sure that you've started relayer. Please check [Setup](/docs/additional-features/chainbridge/setup) for more details. 273 | 274 | During NFT transfer from Mumbai to Edge, ERC721 Handler contract in Mumbai withdraws NFT from your account. You'll call approve before transfer. 275 | 276 | ```bash 277 | $ npx hardhat approve --type erc721 --contract [ERC721_CONTRACT_ADDRESS] --address [ERC721_HANDLER_CONTRACT_ADDRESS] --id 0x50 --network mumbai 278 | ``` 279 | 280 | Finally, you'll start NFT transfer from Mumbai to Edge. 281 | 282 | ```bash 283 | # Start transfer from Mumbai to Polygon Edge chain 284 | $ cb-sol-cli erc721 deposit \ 285 | --url https://rpc-mumbai.matic.today \ 286 | --privateKey [PRIVATE_KEY] \ 287 | --gasPrice [GAS_PRICE] \ 288 | --id 0x50 \ 289 | # ChainID for Polygon Edge chain 290 | --dest 100 \ 291 | --bridge "[BRIDGE_CONTRACT_ADDRESS]" \ 292 | --recipient "[RECIPIENT_ADDRESS_IN_POLYGON_EDGE_CHAIN]" \ 293 | --resourceId "0x000000000000000000000000000000e389d61c11e5fe32ec1735b3cd38c69501" 294 | ``` 295 | 296 | After the deposit transaction is successful, the relayer will get the event and vote for the proposal. 297 | It executes a transaction to send NFT to the recipient account in Polygon Edge chain after the required number of votes are submitted. 298 | 299 | ```bash 300 | INFO[11-19|09:07:50] Handling nonfungible deposit event chain=mumbai 301 | INFO[11-19|09:07:50] Attempting to resolve message chain=polygon-edge type=NonFungibleTransfer src=99 dst=100 nonce=2 rId=000000000000000000000000000000e389d61c11e5fe32ec1735b3cd38c69501 302 | INFO[11-19|09:07:50] Creating erc721 proposal chain=polygon-edge src=99 nonce=2 303 | INFO[11-19|09:07:50] Watching for finalization event chain=polygon-edge src=99 nonce=2 304 | INFO[11-19|09:07:50] Submitted proposal vote chain=polygon-edge tx=0x58a22d84a08269ad2e8d52d8dc038621f1a21109d11c7b6e0d32d5bf21ea8505 src=99 depositNonce=2 gasPrice=1 305 | INFO[11-19|09:08:15] Submitted proposal execution chain=polygon-edge tx=0x57419844881a07531e31667c609421662d94d21d0709e64fb728138309267e68 src=99 dst=100 nonce=2 gasPrice=3 306 | ``` 307 | 308 | Once the execution transaction is successful, you will get NFT in Polygon Edge chain. 309 | -------------------------------------------------------------------------------- /docs/additional-features/stress-testing.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: stress-testing 3 | title: Network stress testing 4 | --- 5 | 6 | ## Prerequisites 7 | 8 | This guide assumes that: 9 | 10 | - You have a working Polygon Edge network up and running 11 | - Both your JSON-RPC and GRPC endpoints are reachable 12 | 13 | ## Overview 14 | 15 | The Polygon Edge Loadbot is a helper utility that is meant to stress test a Polygon Edge network. 16 | 17 | Currently, it supports 2 modes: 18 | 19 | - `transfer` - mode that does stress testing using fund-transfer transactions. **[Default]**. 20 | - `deploy` - mode that deploys specified smart contracts with each transaction. 21 | 22 | ### Transfer Mode 23 | 24 | The transfer mode assumes that there is a sender account that has initial funds to conduct the loadbot run. 25 | 26 | The sender's account address and private key need to be set in the environment variables: 27 | 28 | ```bash 29 | # Example 30 | export LOADBOT_0x9A2E59d06899a383ef47C1Ec265317986D026055=154c4bc0cca942d8a0b49ece04d95c872d8f53d34b8f2ac76253a3700e4f1151 31 | ``` 32 | 33 | ### Deploy Mode 34 | 35 | The deploy mode conducts contract deployment with each new transaction in the loadbot run. 36 | The contract being deployed can be specified using [specific flags](/docs/get-started/cli-commands#loadbot-flags), or if the contract path is omitted, a default 37 | `Greeter.sol` [contract](https://github.com/nomiclabs/hardhat/blob/master/packages/hardhat-core/sample-projects/basic/contracts/Greeter.sol) is used instead. 38 | 39 | ### Terminology 40 | 41 | This section covers some basic terminology regarding the loadbot configuration. 42 | 43 | - **count** - The number of transactions to be submitted in the specified mode 44 | - **tps** - The number of transactions that should be submitted to the node per second 45 | 46 | ## Start the loadbot 47 | 48 | As an example, here is a valid command you can use to run the loadbot using two premined accounts: 49 | ```bash 50 | polygon-edge loadbot --jsonrpc http://127.0.0.1:10002 --grpc-address 127.0.0.1:10000 --sender 0x9A2E59d06899a383ef47C1Ec265317986D026055 --count 2000 --value 0x100 --tps 100 51 | ``` 52 | 53 | You should get a result similar to this on your terminal : 54 | ```bash 55 | =====[LOADBOT RUN]===== 56 | 57 | [COUNT DATA] 58 | Transactions submitted = 2000 59 | Transactions failed = 0 60 | 61 | [TURN AROUND DATA] 62 | Average transaction turn around = 3.490800s 63 | Fastest transaction turn around = 2.002320s 64 | Slowest transaction turn around = 5.006770s 65 | Total loadbot execution time = 24.009350s 66 | 67 | [BLOCK DATA] 68 | Blocks required = 11 69 | 70 | Block #223 = 120 txns 71 | Block #224 = 203 txns 72 | Block #225 = 203 txns 73 | Block #226 = 202 txns 74 | Block #227 = 201 txns 75 | Block #228 = 199 txns 76 | Block #229 = 200 txns 77 | Block #230 = 199 txns 78 | Block #231 = 201 txns 79 | Block #232 = 200 txns 80 | Block #233 = 72 txns 81 | ``` -------------------------------------------------------------------------------- /docs/architecture/modules/blockchain.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: blockchain 3 | title: Blockchain 4 | --- 5 | 6 | ## Overview 7 | 8 | One of the main modules of the Polygon Edge are **Blockchain** and **State**.
9 | 10 | **Blockchain** is the powerhouse that deals with block reorganizations. This means that it deals with all the logic that happens when a new block is included in the blockchain. 11 | 12 | **State** represents the *state transition* object. It deals with how the state changes when a new block is included.
Among other things, **State** handles: 13 | * Executing transactions 14 | * Executing the EVM 15 | * Changing the Merkle tries 16 | * Much more, which is covered in the corresponding **State** section 🙂 17 | 18 | The key takeaway is that these 2 parts are very connected, and they work closely together in order for the client to function.
For example, when the **Blockchain** layer receives a new block (and no reorganization occurred), it calls the **State** to perform a state transition. 19 | 20 | **Blockchain** also has to deal with some parts relating to consensus (ex. *is this ethHash correct?*, *is this PoW correct?*).
In one sentence, **it is the main core of logic through which all blocks are included**. 21 | 22 | ## *WriteBlocks* 23 | 24 | One of the most important parts relating to the **Blockchain** layer is the *WriteBlocks* method: 25 | 26 | ````go title="blockchain/blockchain.go" 27 | // WriteBlocks writes a batch of blocks 28 | func (b *Blockchain) WriteBlocks(blocks []*types.Block) error { 29 | if len(blocks) == 0 { 30 | return fmt.Errorf("no headers found to insert") 31 | } 32 | 33 | parent, ok := b.readHeader(blocks[0].ParentHash()) 34 | if !ok { 35 | return fmt.Errorf("parent of %s (%d) not found: %s", blocks[0].Hash().String(), blocks[0].Number(), blocks[0].ParentHash()) 36 | } 37 | 38 | // validate chain 39 | for i := 0; i < len(blocks); i++ { 40 | block := blocks[i] 41 | 42 | if block.Number()-1 != parent.Number { 43 | return fmt.Errorf("number sequence not correct at %d, %d and %d", i, block.Number(), parent.Number) 44 | } 45 | if block.ParentHash() != parent.Hash { 46 | return fmt.Errorf("parent hash not correct") 47 | } 48 | if err := b.consensus.VerifyHeader(parent, block.Header, false, true); err != nil { 49 | return fmt.Errorf("failed to verify the header: %v", err) 50 | } 51 | 52 | // verify body data 53 | if hash := buildroot.CalculateUncleRoot(block.Uncles); hash != block.Header.Sha3Uncles { 54 | return fmt.Errorf("uncle root hash mismatch: have %s, want %s", hash, block.Header.Sha3Uncles) 55 | } 56 | 57 | if hash := buildroot.CalculateTransactionsRoot(block.Transactions); hash != block.Header.TxRoot { 58 | return fmt.Errorf("transaction root hash mismatch: have %s, want %s", hash, block.Header.TxRoot) 59 | } 60 | parent = block.Header 61 | } 62 | 63 | // Write chain 64 | for indx, block := range blocks { 65 | header := block.Header 66 | 67 | body := block.Body() 68 | if err := b.db.WriteBody(header.Hash, block.Body()); err != nil { 69 | return err 70 | } 71 | b.bodiesCache.Add(header.Hash, body) 72 | 73 | // Verify uncles. It requires to have the bodies on memory 74 | if err := b.VerifyUncles(block); err != nil { 75 | return err 76 | } 77 | // Process and validate the block 78 | if err := b.processBlock(blocks[indx]); err != nil { 79 | return err 80 | } 81 | 82 | // Write the header to the chain 83 | evnt := &Event{} 84 | if err := b.writeHeaderImpl(evnt, header); err != nil { 85 | return err 86 | } 87 | b.dispatchEvent(evnt) 88 | 89 | // Update the average gas price 90 | b.UpdateGasPriceAvg(new(big.Int).SetUint64(header.GasUsed)) 91 | } 92 | 93 | return nil 94 | } 95 | ```` 96 | The *WriteBlocks* method is the entry point to write blocks into the blockchain. As a parameter, it takes in a range of blocks.
97 | Firstly, the blocks are validated. After that, they are written to the chain. 98 | 99 | The actual *state transition* is performed by calling the *processBlock* method within *WriteBlocks*. 100 | 101 | It is worth mentioning that, because it is the entry point for writing blocks to the blockchain, other modules (such as the **Sealer**) utilize this method. 102 | 103 | ## Blockchain Subscriptions 104 | 105 | There needs to be a way to monitor blockchain-related changes.
106 | This is where **Subscriptions** come in. 107 | 108 | Subscriptions are a way to tap into blockchain event streams and instantly receive meaningful data. 109 | 110 | ````go title="blockchain/subscription.go" 111 | type Subscription interface { 112 | // Returns a Blockchain Event channel 113 | GetEventCh() chan *Event 114 | 115 | // Returns the latest event (blocking) 116 | GetEvent() *Event 117 | 118 | // Closes the subscription 119 | Close() 120 | } 121 | ```` 122 | 123 | The **Blockchain Events** contain information regarding any changes made to the actual chain. This includes reorganizations, as well as new blocks: 124 | 125 | ````go title="blockchain/subscription.go" 126 | type Event struct { 127 | // Old chain removed if there was a reorg 128 | OldChain []*types.Header 129 | 130 | // New part of the chain (or a fork) 131 | NewChain []*types.Header 132 | 133 | // Difficulty is the new difficulty created with this event 134 | Difficulty *big.Int 135 | 136 | // Type is the type of event 137 | Type EventType 138 | 139 | // Source is the source that generated the blocks for the event 140 | // right now it can be either the Sealer or the Syncer. TODO 141 | Source string 142 | } 143 | ```` 144 | 145 | :::tip Refresher 146 | Do you remember when we mentioned the ***monitor*** command in the [CLI Commands](/docs/get-started/cli-commands)? 147 | 148 | The Blockchain Events are the original events that happen in Polygon Edge, and they're later mapped to a Protocol Buffers message format for easy transfer. 149 | ::: -------------------------------------------------------------------------------- /docs/architecture/modules/json-rpc.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: json-rpc 3 | title: JSON RPC 4 | --- 5 | 6 | ## Overview 7 | 8 | The **JSON RPC** module implements the **JSON RPC API layer**, something that dApp developers use to interact with the 9 | blockchain. 10 | 11 | It includes support for standard **[json-rpc endpoints](https://eth.wiki/json-rpc/API)**, as well as websocket 12 | endpoints. 13 | 14 | ## Blockchain Interface 15 | 16 | The Polygon Edge uses the ***blockchain interface*** to define all the methods that the JSON RPC module needs to use, in 17 | order to deliver its endpoints. 18 | 19 | The blockchain interface is implemented by the **[Minimal](/docs/architecture/modules/minimal)** server. It is the base implementation 20 | that's passed into the JSON RPC layer. 21 | 22 | ````go title="jsonrpc/blockchain.go" 23 | type blockchainInterface interface { 24 | // Header returns the current header of the chain (genesis if empty) 25 | Header() *types.Header 26 | 27 | // GetReceiptsByHash returns the receipts for a hash 28 | GetReceiptsByHash(hash types.Hash) ([]*types.Receipt, error) 29 | 30 | // Subscribe subscribes for chain head events 31 | SubscribeEvents() blockchain.Subscription 32 | 33 | // GetHeaderByNumber returns the header by number 34 | GetHeaderByNumber(block uint64) (*types.Header, bool) 35 | 36 | // GetAvgGasPrice returns the average gas price 37 | GetAvgGasPrice() *big.Int 38 | 39 | // AddTx adds a new transaction to the tx pool 40 | AddTx(tx *types.Transaction) error 41 | 42 | // State returns a reference to the state 43 | State() state.State 44 | 45 | // BeginTxn starts a transition object 46 | BeginTxn(parentRoot types.Hash, header *types.Header) (*state.Transition, error) 47 | 48 | // GetBlockByHash gets a block using the provided hash 49 | GetBlockByHash(hash types.Hash, full bool) (*types.Block, bool) 50 | 51 | // ApplyTxn applies a transaction object to the blockchain 52 | ApplyTxn(header *types.Header, txn *types.Transaction) ([]byte, bool, error) 53 | 54 | stateHelperInterface 55 | } 56 | ```` 57 | 58 | ## ETH Endpoints 59 | 60 | All the standard JSON RPC endpoints are implemented in: 61 | 62 | ````bash 63 | jsonrpc/eth_endpoint.go 64 | ```` 65 | 66 | ## Filter Manager 67 | 68 | The **Filter Manager** is a service that runs alongside the JSON RPC server. 69 | 70 | It provides support for filtering blocks on the blockchain.
71 | Specifically, it includes both a **log** and a **block** level filter. 72 | 73 | The Filter Manager relies heavily on Subscription Events, mentioned in 74 | the [Blockchain](blockchain#blockchain-subscriptions) section 75 | 76 | ````go title="jsonrpc/filter_manager.go" 77 | type Filter struct { 78 | id string 79 | 80 | // block filter 81 | block *headElem 82 | 83 | // log cache 84 | logs []*Log 85 | 86 | // log filter 87 | logFilter *LogFilter 88 | 89 | // index of the filter in the timer array 90 | index int 91 | 92 | // next time to timeout 93 | timestamp time.Time 94 | 95 | // websocket connection 96 | ws wsConn 97 | } 98 | 99 | 100 | type FilterManager struct { 101 | logger hclog.Logger 102 | 103 | store blockchainInterface 104 | closeCh chan struct{} 105 | 106 | subscription blockchain.Subscription 107 | 108 | filters map[string]*Filter 109 | lock sync.Mutex 110 | 111 | updateCh chan struct{} 112 | timer timeHeapImpl 113 | timeout time.Duration 114 | 115 | blockStream *blockStream 116 | } 117 | 118 | ```` 119 | 120 | Filter Manager events get dispatched in the *Run* method: 121 | 122 | ````go title="jsonrpc/filter_manager.go" 123 | func (f *FilterManager) Run() { 124 | 125 | // watch for new events in the blockchain 126 | watchCh := make(chan *blockchain.Event) 127 | go func() { 128 | for { 129 | evnt := f.subscription.GetEvent() 130 | if evnt == nil { 131 | return 132 | } 133 | watchCh <- evnt 134 | } 135 | }() 136 | 137 | var timeoutCh <-chan time.Time 138 | for { 139 | // check for the next filter to be removed 140 | filter := f.nextTimeoutFilter() 141 | if filter == nil { 142 | timeoutCh = nil 143 | } else { 144 | timeoutCh = time.After(filter.timestamp.Sub(time.Now())) 145 | } 146 | 147 | select { 148 | case evnt := <-watchCh: 149 | // new blockchain event 150 | if err := f.dispatchEvent(evnt); err != nil { 151 | f.logger.Error("failed to dispatch event", "err", err) 152 | } 153 | 154 | case <-timeoutCh: 155 | // timeout for filter 156 | if !f.Uninstall(filter.id) { 157 | f.logger.Error("failed to uninstall filter", "id", filter.id) 158 | } 159 | 160 | case <-f.updateCh: 161 | // there is a new filter, reset the loop to start the timeout timer 162 | 163 | case <-f.closeCh: 164 | // stop the filter manager 165 | return 166 | } 167 | } 168 | } 169 | ```` 170 | 171 | ## 📜 Resources 172 | * **[Ethereum JSON-RPC](https://eth.wiki/json-rpc/API)** 173 | -------------------------------------------------------------------------------- /docs/architecture/modules/minimal.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: minimal 3 | title: Minimal 4 | --- 5 | 6 | ## Overview 7 | 8 | As mentioned before, Polygon Edge is a set of different modules, all connected to each other.
9 | The **Blockchain** is connected to the **State**, or for example, **Synchronization**, which pipes new blocks into the **Blockchain**. 10 | 11 | **Minimal** is the cornerstone for these inter-connected modules.
12 | It acts as a central hub for all the services that run on the Polygon Edge. 13 | 14 | ## Startup Magic 15 | 16 | Among other things, Minimal is responsible for: 17 | * Setting up data directories 18 | * Creating a keystore for libp2p communication 19 | * Creating storage 20 | * Setting up consensus 21 | * Setting up the blockchain object with GRPC, JSON RPC, and Synchronization 22 | 23 | ````go title="minimal/server.go" 24 | func NewServer(logger hclog.Logger, config *Config) (*Server, error) { 25 | m := &Server{ 26 | logger: logger, 27 | config: config, 28 | chain: config.Chain, 29 | grpcServer: grpc.NewServer(), 30 | } 31 | 32 | m.logger.Info("Data dir", "path", config.DataDir) 33 | 34 | // Generate all the paths in the dataDir 35 | if err := setupDataDir(config.DataDir, dirPaths); err != nil { 36 | return nil, fmt.Errorf("failed to create data directories: %v", err) 37 | } 38 | 39 | // Get the private key for the node 40 | keystore := keystore.NewLocalKeystore(filepath.Join(config.DataDir, "keystore")) 41 | key, err := keystore.Get() 42 | if err != nil { 43 | return nil, fmt.Errorf("failed to read private key: %v", err) 44 | } 45 | m.key = key 46 | 47 | storage, err := leveldb.NewLevelDBStorage(filepath.Join(config.DataDir, "blockchain"), logger) 48 | if err != nil { 49 | return nil, err 50 | } 51 | m.storage = storage 52 | 53 | // Setup consensus 54 | if err := m.setupConsensus(); err != nil { 55 | return nil, err 56 | } 57 | 58 | stateStorage, err := itrie.NewLevelDBStorage(filepath.Join(m.config.DataDir, "trie"), logger) 59 | if err != nil { 60 | return nil, err 61 | } 62 | 63 | st := itrie.NewState(stateStorage) 64 | m.state = st 65 | 66 | executor := state.NewExecutor(config.Chain.Params, st) 67 | executor.SetRuntime(precompiled.NewPrecompiled()) 68 | executor.SetRuntime(evm.NewEVM()) 69 | 70 | // Blockchain object 71 | m.blockchain, err = blockchain.NewBlockchain(logger, storage, config.Chain, m.consensus, executor) 72 | if err != nil { 73 | return nil, err 74 | } 75 | 76 | executor.GetHash = m.blockchain.GetHashHelper 77 | 78 | // Setup sealer 79 | sealerConfig := &sealer.Config{ 80 | Coinbase: crypto.PubKeyToAddress(&m.key.PublicKey), 81 | } 82 | m.Sealer = sealer.NewSealer(sealerConfig, logger, m.blockchain, m.consensus, executor) 83 | m.Sealer.SetEnabled(m.config.Seal) 84 | 85 | // Setup the libp2p server 86 | if err := m.setupLibP2P(); err != nil { 87 | return nil, err 88 | } 89 | 90 | // Setup the GRPC server 91 | if err := m.setupGRPC(); err != nil { 92 | return nil, err 93 | } 94 | 95 | // Setup jsonrpc 96 | if err := m.setupJSONRPC(); err != nil { 97 | return nil, err 98 | } 99 | 100 | // Setup the syncer protocol 101 | m.syncer = protocol.NewSyncer(logger, m.blockchain) 102 | m.syncer.Register(m.libp2pServer.GetGRPCServer()) 103 | m.syncer.Start() 104 | 105 | // Register the libp2p GRPC endpoints 106 | proto.RegisterHandshakeServer(m.libp2pServer.GetGRPCServer(), &handshakeService{s: m}) 107 | 108 | m.libp2pServer.Serve() 109 | return m, nil 110 | } 111 | ```` -------------------------------------------------------------------------------- /docs/architecture/modules/networking.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: networking 3 | title: Networking 4 | --- 5 | 6 | ## Overview 7 | 8 | A node has to communicate with other nodes on the network, in order to exchange useful information.
9 | To accomplish this task, the Polygon Edge leverages the battle-tested **libp2p** framework. 10 | 11 | The choice to go with **libp2p** is primarily focused on: 12 | * **Speed** - libp2p has a significant performance improvement over devp2p (used in GETH and other clients) 13 | * **Extensibility** - it serves as a great foundation for other features of the system 14 | * **Modularity** - libp2p is modular by nature, just like the Polygon Edge. This gives greater flexibility, especially when parts of the Polygon Edge need to be swappable 15 | 16 | ## GRPC 17 | 18 | On top of **libp2p**, the Polygon Edge uses the **GRPC** protocol.
19 | Technically, the Polygon Edge uses several GRPC protocols, which will be covered later on. 20 | 21 | The GRPC layer helps abstract all the request/reply protocols and simplifies the streaming protocols needed for the Polygon Edge to function. 22 | 23 | GRPC relies on **Protocol Buffers** to define *services* and *message structures*.
24 | The services and structures are defined in *.proto* files, which are compiled and are language-agnostic. 25 | 26 | Earlier, we mentioned that the Polygon Edge leverages several GRPC protocols.
27 | This was done to boost the overall UX for the node operator, something which often lags with clients like GETH and Parity. 28 | 29 | The node operator has a better overview of what is going on with the system by calling the GRPC service, instead of sifting through logs to find the information they're looking for. 30 | 31 | ### GRPC for Node Operators 32 | 33 | The following section might seem familiar because it was briefly covered in the [CLI Commands](/docs/get-started/cli-commands) section. 34 | 35 | The GRPC service that is intended to be used by **node operators** is defined like so: 36 | ````go title="minimal/proto/system.proto" 37 | service System { 38 | // GetInfo returns info about the client 39 | rpc GetStatus(google.protobuf.Empty) returns (ServerStatus); 40 | 41 | // PeersAdd adds a new peer 42 | rpc PeersAdd(PeersAddRequest) returns (google.protobuf.Empty); 43 | 44 | // PeersList returns the list of peers 45 | rpc PeersList(google.protobuf.Empty) returns (PeersListResponse); 46 | 47 | // PeersInfo returns the info of a peer 48 | rpc PeersStatus(PeersStatusRequest) returns (Peer); 49 | 50 | // Subscribe subscribes to blockchain events 51 | rpc Subscribe(google.protobuf.Empty) returns (stream BlockchainEvent); 52 | } 53 | ```` 54 | :::tip 55 | The CLI commands actually call the implementations of these service methods. 56 | 57 | These methods are implemented in ***minimal/system_service.go***. 58 | ::: 59 | 60 | ### GRPC for Other Nodes 61 | 62 | The Polygon Edge also implements several service methods that are used by other nodes on the network.
63 | The mentioned service is described in the **[Protocol](/docs/architecture/modules/protocol)** section. 64 | 65 | ## 📜 Resources 66 | * **[Protocol Buffers](https://developers.google.com/protocol-buffers)** 67 | * **[libp2p](https://libp2p.io/)** 68 | * **[gRPC](https://grpc.io/)** -------------------------------------------------------------------------------- /docs/architecture/modules/other-modules.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: other-modules 3 | title: Other modules 4 | --- 5 | 6 | ## Crypto 7 | 8 | The **Crypto** module contains crypto utility functions. 9 | 10 | ## Chain 11 | 12 | The **Chain** module contains chain parameters (active forks, consensus engine, etc.) 13 | 14 | * **chains** - Predefined chain configurations (mainnet, goerli, ibft) 15 | 16 | ## Helper 17 | 18 | The **Helper** module contains helper packages. 19 | 20 | * **dao** - Dao utils 21 | * **enode** - Enode encoding/decoding function 22 | * **hex** - Hex encoding/decoding functions 23 | * **ipc** - IPC connection functions 24 | * **keccak** - Keccak functions 25 | * **rlputil** - Rlp encoding/decoding helper function 26 | 27 | ## Command 28 | 29 | The **Command** module contains interfaces for CLI commands. -------------------------------------------------------------------------------- /docs/architecture/modules/protocol.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: protocol 3 | title: Protocol 4 | --- 5 | 6 | ## Overview 7 | 8 | The **Protocol** module contains the logic for the synchronization protocol. 9 | 10 | The Polygon Edge uses **libp2p** as the networking layer, and on top of that runs **gRPC**. 11 | 12 | ## GRPC for Other Nodes 13 | 14 | ````go title="protocol/proto/v1.proto" 15 | service V1 { 16 | // Returns status information regarding the specific point in time 17 | rpc GetCurrent(google.protobuf.Empty) returns (V1Status); 18 | 19 | // Returns any type of object (Header, Body, Receipts...) 20 | rpc GetObjectsByHash(HashRequest) returns (Response); 21 | 22 | // Returns a range of headers 23 | rpc GetHeaders(GetHeadersRequest) returns (Response); 24 | 25 | // Watches what new blocks get included 26 | rpc Watch(google.protobuf.Empty) returns (stream V1Status); 27 | } 28 | ```` 29 | 30 | ### Status Object 31 | 32 | ````go title="protocol/proto/v1.proto" 33 | message V1Status { 34 | string difficulty = 1; 35 | string hash = 2; 36 | int64 number = 3; 37 | } 38 | ```` -------------------------------------------------------------------------------- /docs/architecture/modules/sealer.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: sealer 3 | title: Sealer 4 | --- 5 | 6 | ## Overview 7 | 8 | The **Sealer** is an entity that gathers the transactions, and creates a new block.
9 | Then, that block is sent to the **Consensus** module to seal it. 10 | 11 | The final sealing logic is located within the **Consensus** module. 12 | 13 | ## Run Method 14 | 15 | ````go title="sealer/sealer.go" 16 | func (s *Sealer) run(ctx context.Context) { 17 | sub := s.blockchain.SubscribeEvents() 18 | eventCh := sub.GetEventCh() 19 | 20 | for { 21 | if s.config.DevMode { 22 | // In dev-mode we wait for new transactions to seal blocks 23 | select { 24 | case <-s.wakeCh: 25 | case <-ctx.Done(): 26 | return 27 | } 28 | } 29 | 30 | // start sealing 31 | subCtx, cancel := context.WithCancel(ctx) 32 | done := s.sealAsync(subCtx) 33 | 34 | // wait for the sealing to be done 35 | select { 36 | case <-done: 37 | // the sealing process has finished 38 | case <-ctx.Done(): 39 | // the sealing routine has been canceled 40 | case <-eventCh: 41 | // there is a new head, reset sealer 42 | } 43 | 44 | // cancel the sealing process context 45 | cancel() 46 | 47 | if ctx.Err() != nil { 48 | return 49 | } 50 | } 51 | } 52 | ```` 53 | 54 | :::caution Work in progress 55 | The **Sealer** and the **Consensus** modules will be combined into a single entity in the near future. 56 | 57 | The new module will incorporate modular logic for different kinds of consensus mechanisms, which require different sealing implementations: 58 | * **PoS** (Proof of Stake) 59 | * **PoA** (Proof of Authority) 60 | 61 | Currently, the **Sealer** and the **Consensus** modules work with PoW (Proof of Work). 62 | ::: -------------------------------------------------------------------------------- /docs/architecture/modules/state.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: state 3 | title: State 4 | --- 5 | 6 | To truly understand how **State** works, you must understand some basic Ethereum concepts.
7 | 8 | We highly recommend reading the **[State in Ethereum guide](/docs/concepts/ethereum-state)**. 9 | 10 | ## Overview 11 | 12 | Now that we've familiarized ourselves with basic Ethereum concepts, the next overview should be easy. 13 | 14 | We mentioned that the **World state trie** has all the Ethereum accounts that exist.
15 | These accounts are the leaves of the Merkle trie. Each leaf has encoded **Account State** information. 16 | 17 | This enables the Polygon Edge to get a specific Merkle trie, for a specific point in time.
18 | For example, we can get the hash of the state at block 10. 19 | 20 | The Merkle trie, at any point in time, is called a ***Snapshot***. 21 | 22 | We can have ***Snapshots*** for the **state trie**, or for the **storage trie** - they are basically the same.
23 | The only difference is in what the leaves represent: 24 | 25 | * In the case of the storage trie, the leaves contain an arbitrary state, which we cannot process or know what's in there 26 | * In the case of the state trie, the leaves represent accounts 27 | 28 | ````go title="state/state.go 29 | type State interface { 30 | // Gets a snapshot for a specific hash 31 | NewSnapshotAt(types.Hash) (Snapshot, error) 32 | 33 | // Gets the latest snapshot 34 | NewSnapshot() Snapshot 35 | 36 | // Gets the codeHash 37 | GetCode(hash types.Hash) ([]byte, bool) 38 | } 39 | ```` 40 | 41 | The **Snapshot** interface is defined as such: 42 | 43 | ````go title="state/state.go 44 | type Snapshot interface { 45 | // Gets a specific value for a leaf 46 | Get(k []byte) ([]byte, bool) 47 | 48 | // Commits new information 49 | Commit(objs []*Object) (Snapshot, []byte) 50 | } 51 | ```` 52 | 53 | The information that can be committed is defined by the *Object struct*: 54 | 55 | ````go title="state/state.go 56 | // Object is the serialization of the radix object 57 | type Object struct { 58 | Address types.Address 59 | CodeHash types.Hash 60 | Balance *big.Int 61 | Root types.Hash 62 | Nonce uint64 63 | Deleted bool 64 | 65 | DirtyCode bool 66 | Code []byte 67 | 68 | Storage []*StorageObject 69 | } 70 | ```` 71 | 72 | The implementation for the Merkle trie is in the *state/immutable-trie* folder.
73 | *state/immutable-trie/state.go* implements the **State** interface. 74 | 75 | *state/immutable-trie/trie.go* is the main Merkle trie object. It represents an optimized version of the Merkle trie, 76 | which reuses as much memory as possible. 77 | 78 | ## Executor 79 | 80 | *state/executor.go* includes all the information needed for the Polygon Edge to decide how a block changes the current 81 | state. The implementation of *ProcessBlock* is located here. 82 | 83 | The *apply* method does the actual state transition. The executor calls the EVM. 84 | 85 | ````go title="state/executor.go" 86 | func (t *Transition) apply(msg *types.Transaction) ([]byte, uint64, bool, error) { 87 | // check if there is enough gas in the pool 88 | if err := t.subGasPool(msg.Gas); err != nil { 89 | return nil, 0, false, err 90 | } 91 | 92 | txn := t.state 93 | s := txn.Snapshot() 94 | 95 | gas, err := t.preCheck(msg) 96 | if err != nil { 97 | return nil, 0, false, err 98 | } 99 | if gas > msg.Gas { 100 | return nil, 0, false, errorVMOutOfGas 101 | } 102 | 103 | gasPrice := new(big.Int).SetBytes(msg.GetGasPrice()) 104 | value := new(big.Int).SetBytes(msg.Value) 105 | 106 | // Set the specific transaction fields in the context 107 | t.ctx.GasPrice = types.BytesToHash(msg.GetGasPrice()) 108 | t.ctx.Origin = msg.From 109 | 110 | var subErr error 111 | var gasLeft uint64 112 | var returnValue []byte 113 | 114 | if msg.IsContractCreation() { 115 | _, gasLeft, subErr = t.Create2(msg.From, msg.Input, value, gas) 116 | } else { 117 | txn.IncrNonce(msg.From) 118 | returnValue, gasLeft, subErr = t.Call2(msg.From, *msg.To, msg.Input, value, gas) 119 | } 120 | 121 | if subErr != nil { 122 | if subErr == runtime.ErrNotEnoughFunds { 123 | txn.RevertToSnapshot(s) 124 | return nil, 0, false, subErr 125 | } 126 | } 127 | 128 | gasUsed := msg.Gas - gasLeft 129 | refund := gasUsed / 2 130 | if refund > txn.GetRefund() { 131 | refund = txn.GetRefund() 132 | } 133 | 134 | gasLeft += refund 135 | gasUsed -= refund 136 | 137 | // refund the sender 138 | remaining := new(big.Int).Mul(new(big.Int).SetUint64(gasLeft), gasPrice) 139 | txn.AddBalance(msg.From, remaining) 140 | 141 | // pay the coinbase 142 | coinbaseFee := new(big.Int).Mul(new(big.Int).SetUint64(gasUsed), gasPrice) 143 | txn.AddBalance(t.ctx.Coinbase, coinbaseFee) 144 | 145 | // return gas to the pool 146 | t.addGasPool(gasLeft) 147 | 148 | return returnValue, gasUsed, subErr != nil, nil 149 | } 150 | ```` 151 | 152 | ## Runtime 153 | 154 | When a state transition is executed, the main module that executes the state transition is the EVM (located in 155 | state/runtime/evm). 156 | 157 | The **dispatch table** does a match between the **opcode** and the instruction. 158 | 159 | ````go title="state/runtime/evm/dispatch_table.go" 160 | func init() { 161 | // unsigned arithmetic operations 162 | register(STOP, handler{opStop, 0, 0}) 163 | register(ADD, handler{opAdd, 2, 3}) 164 | register(SUB, handler{opSub, 2, 3}) 165 | register(MUL, handler{opMul, 2, 5}) 166 | register(DIV, handler{opDiv, 2, 5}) 167 | register(SDIV, handler{opSDiv, 2, 5}) 168 | register(MOD, handler{opMod, 2, 5}) 169 | register(SMOD, handler{opSMod, 2, 5}) 170 | register(EXP, handler{opExp, 2, 10}) 171 | 172 | ... 173 | 174 | // jumps 175 | register(JUMP, handler{opJump, 1, 8}) 176 | register(JUMPI, handler{opJumpi, 2, 10}) 177 | register(JUMPDEST, handler{opJumpDest, 0, 1}) 178 | } 179 | ```` 180 | 181 | The core logic that powers the EVM is the *Run* loop.
182 | 183 | This is the main entry point for the EVM. It does a loop and checks the current opcode, fetches the instruction, checks 184 | if it can be executed, consumes gas and executes the instruction until it either fails or stops. 185 | 186 | ````go title="state/runtime/evm/state.go" 187 | 188 | // Run executes the virtual machine 189 | func (c *state) Run() ([]byte, error) { 190 | var vmerr error 191 | 192 | codeSize := len(c.code) 193 | 194 | for !c.stop { 195 | if c.ip >= codeSize { 196 | c.halt() 197 | break 198 | } 199 | 200 | op := OpCode(c.code[c.ip]) 201 | 202 | inst := dispatchTable[op] 203 | 204 | if inst.inst == nil { 205 | c.exit(errOpCodeNotFound) 206 | break 207 | } 208 | 209 | // check if the depth of the stack is enough for the instruction 210 | if c.sp < inst.stack { 211 | c.exit(errStackUnderflow) 212 | break 213 | } 214 | 215 | // consume the gas of the instruction 216 | if !c.consumeGas(inst.gas) { 217 | c.exit(errOutOfGas) 218 | break 219 | } 220 | 221 | // execute the instruction 222 | inst.inst(c) 223 | 224 | // check if stack size exceeds the max size 225 | if c.sp > stackSize { 226 | c.exit(errStackOverflow) 227 | break 228 | } 229 | 230 | c.ip++ 231 | } 232 | 233 | if err := c.err; err != nil { 234 | vmerr = err 235 | } 236 | 237 | return c.ret, vmerr 238 | } 239 | ```` -------------------------------------------------------------------------------- /docs/architecture/modules/storage.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: storage 3 | title: Storage 4 | --- 5 | 6 | ## Overview 7 | 8 | The Polygon Edge currently utilizes **LevelDB** for data storage, as well as an **in-memory** data store. 9 | 10 | Throughout the Polygon Edge, when modules need to interact with the underlying data store, 11 | they don't need to know which DB engine or service they're speaking to. 12 | 13 | The DB layer is abstracted away between a module called **Storage**, which exports interfaces that modules query. 14 | 15 | Each DB layer, for now only **LevelDB**, implements these methods separately, making sure they fit in with their implementation. 16 | 17 | ````go title="blockchain/storage/storage.go" 18 | // Storage is a generic blockchain storage 19 | type Storage interface { 20 | ReadCanonicalHash(n uint64) (types.Hash, bool) 21 | WriteCanonicalHash(n uint64, hash types.Hash) error 22 | 23 | ReadHeadHash() (types.Hash, bool) 24 | ReadHeadNumber() (uint64, bool) 25 | WriteHeadHash(h types.Hash) error 26 | WriteHeadNumber(uint64) error 27 | 28 | WriteForks(forks []types.Hash) error 29 | ReadForks() ([]types.Hash, error) 30 | 31 | WriteDiff(hash types.Hash, diff *big.Int) error 32 | ReadDiff(hash types.Hash) (*big.Int, bool) 33 | 34 | WriteHeader(h *types.Header) error 35 | ReadHeader(hash types.Hash) (*types.Header, error) 36 | 37 | WriteCanonicalHeader(h *types.Header, diff *big.Int) error 38 | 39 | WriteBody(hash types.Hash, body *types.Body) error 40 | ReadBody(hash types.Hash) (*types.Body, error) 41 | 42 | WriteSnapshot(hash types.Hash, blob []byte) error 43 | ReadSnapshot(hash types.Hash) ([]byte, bool) 44 | 45 | WriteReceipts(hash types.Hash, receipts []*types.Receipt) error 46 | ReadReceipts(hash types.Hash) ([]*types.Receipt, error) 47 | 48 | WriteTxLookup(hash types.Hash, blockHash types.Hash) error 49 | ReadTxLookup(hash types.Hash) (types.Hash, bool) 50 | 51 | Close() error 52 | } 53 | ```` 54 | 55 | ## LevelDB 56 | 57 | ### Prefixes 58 | 59 | In order to make querying the LevelDB storage deterministic, and to avoid key storage clashing, the Polygon Edge leverages 60 | prefixes and sub-prefixes when storing data 61 | 62 | ````go title="blockchain/storage/keyvalue.go" 63 | // Prefixes for the key-value store 64 | var ( 65 | // DIFFICULTY is the difficulty prefix 66 | DIFFICULTY = []byte("d") 67 | 68 | // HEADER is the header prefix 69 | HEADER = []byte("h") 70 | 71 | // HEAD is the chain head prefix 72 | HEAD = []byte("o") 73 | 74 | // FORK is the entry to store forks 75 | FORK = []byte("f") 76 | 77 | // CANONICAL is the prefix for the canonical chain numbers 78 | CANONICAL = []byte("c") 79 | 80 | // BODY is the prefix for bodies 81 | BODY = []byte("b") 82 | 83 | // RECEIPTS is the prefix for receipts 84 | RECEIPTS = []byte("r") 85 | 86 | // SNAPSHOTS is the prefix for snapshots 87 | SNAPSHOTS = []byte("s") 88 | 89 | // TX_LOOKUP_PREFIX is the prefix for transaction lookups 90 | TX_LOOKUP_PREFIX = []byte("l") 91 | ) 92 | 93 | // Sub-prefixes 94 | var ( 95 | HASH = []byte("hash") 96 | NUMBER = []byte("number") 97 | EMPTY = []byte("empty") 98 | ) 99 | ```` 100 | 101 | ## Future Plans 102 | 103 | The plans for the near future include adding some of the most popular DB solutions, such as: 104 | * PostgreSQL 105 | * MySQL 106 | 107 | 108 | ## 📜 Resources 109 | * **[LevelDB](https://github.com/google/leveldb)** -------------------------------------------------------------------------------- /docs/architecture/modules/txpool.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: txpool 3 | title: TxPool 4 | --- 5 | 6 | ## Overview 7 | 8 | The TxPool module represents the transaction pool implementation, where transactions are added from different parts of 9 | the system. The module also exposes several useful features for node operators, which are covered below. 10 | 11 | ## Operator Commands 12 | 13 | ````go title="txpool/proto/operator.proto 14 | service TxnPoolOperator { 15 | // Status returns the current status of the pool 16 | rpc Status(google.protobuf.Empty) returns (TxnPoolStatusResp); 17 | 18 | // AddTxn adds a local transaction to the pool 19 | rpc AddTxn(AddTxnReq) returns (google.protobuf.Empty); 20 | 21 | // Subscribe subscribes for new events in the txpool 22 | rpc Subscribe(google.protobuf.Empty) returns (stream TxPoolEvent); 23 | } 24 | 25 | ```` 26 | 27 | Node operators can query these GRPC endpoints, as described in the **[CLI Commands](/docs/get-started/cli-commands#transaction-pool-commands)** section. 28 | 29 | ## Processing Transactions 30 | 31 | ````go title="txpool/txpool.go" 32 | // AddTx adds a new transaction to the pool 33 | func (t *TxPool) AddTx(tx *types.Transaction) error { 34 | if err := t.addImpl("addTxn", tx); err != nil { 35 | return err 36 | } 37 | 38 | // broadcast the transaction only if network is enabled 39 | // and we are not in dev mode 40 | if t.topic != nil && !t.dev { 41 | txn := &proto.Txn{ 42 | Raw: &any.Any{ 43 | Value: tx.MarshalRLP(), 44 | }, 45 | } 46 | if err := t.topic.Publish(txn); err != nil { 47 | t.logger.Error("failed to topic txn", "err", err) 48 | } 49 | } 50 | 51 | if t.NotifyCh != nil { 52 | select { 53 | case t.NotifyCh <- struct{}{}: 54 | default: 55 | } 56 | } 57 | return nil 58 | } 59 | 60 | func (t *TxPool) addImpl(ctx string, txns ...*types.Transaction) error { 61 | if len(txns) == 0 { 62 | return nil 63 | } 64 | 65 | from := txns[0].From 66 | for _, txn := range txns { 67 | // Since this is a single point of inclusion for new transactions both 68 | // to the promoted queue and pending queue we use this point to calculate the hash 69 | txn.ComputeHash() 70 | 71 | err := t.validateTx(txn) 72 | if err != nil { 73 | return err 74 | } 75 | 76 | if txn.From == types.ZeroAddress { 77 | txn.From, err = t.signer.Sender(txn) 78 | if err != nil { 79 | return fmt.Errorf("invalid sender") 80 | } 81 | from = txn.From 82 | } else { 83 | // only if we are in dev mode we can accept 84 | // a transaction without validation 85 | if !t.dev { 86 | return fmt.Errorf("cannot accept non-encrypted txn") 87 | } 88 | } 89 | 90 | t.logger.Debug("add txn", "ctx", ctx, "hash", txn.Hash, "from", from) 91 | } 92 | 93 | txnsQueue, ok := t.queue[from] 94 | if !ok { 95 | stateRoot := t.store.Header().StateRoot 96 | 97 | // initialize the txn queue for the account 98 | txnsQueue = newTxQueue() 99 | txnsQueue.nextNonce = t.store.GetNonce(stateRoot, from) 100 | t.queue[from] = txnsQueue 101 | } 102 | for _, txn := range txns { 103 | txnsQueue.Add(txn) 104 | } 105 | 106 | for _, promoted := range txnsQueue.Promote() { 107 | t.sorted.Push(promoted) 108 | } 109 | return nil 110 | } 111 | ```` 112 | The ***addImpl*** method is the bread and butter of the **TxPool** module. 113 | It is the central place where transactions are added in the system, being called from the GRPC service, JSON RPC endpoints, 114 | and whenever the client receives a transaction through the **gossip** protocol. 115 | 116 | It takes in as an argument **ctx**, which just denotes the context from which the transactions are being added (GRPC, JSON RPC...).
117 | The other parameter is the list of transactions to be added to the pool. 118 | 119 | The key thing to note here is the check for the **From** field within the transaction: 120 | * If the **From** field is **empty**, it is regarded as an unencrypted/unsigned transaction. These kinds of transactions are only 121 | accepted in development mode 122 | * If the **From** field is **not empty**, that means that it's a signed transaction, so signature verification takes place 123 | 124 | After all these validations, the transactions are considered to be valid. 125 | 126 | ## Data structures 127 | 128 | ````go title="txpool/txpool.go" 129 | // TxPool is a pool of transactions 130 | type TxPool struct { 131 | logger hclog.Logger 132 | signer signer 133 | 134 | store store 135 | idlePeriod time.Duration 136 | 137 | queue map[types.Address]*txQueue 138 | sorted *txPriceHeap 139 | 140 | // network stack 141 | network *network.Server 142 | topic *network.Topic 143 | 144 | sealing bool 145 | dev bool 146 | NotifyCh chan struct{} 147 | 148 | proto.UnimplementedTxnPoolOperatorServer 149 | } 150 | ```` 151 | 152 | The fields in the TxPool object that can cause confusion are the **queue** and **sorted** lists. 153 | * **queue** - Heap implementation of a sorted list of account transactions (by nonce) 154 | * **sorted** - Sorted list for all the current promoted transactions (all executable transactions). Sorted by gas price 155 | 156 | ## Gas limit error management 157 | 158 | Whenever you submit a transaction, there are three ways it can be processed by the TxPool. 159 | 160 | 1. All pending transactions can fit in a block 161 | 2. One or more pending transactions can not fit in a block 162 | 3. One or more pending transactions will never fit in a block 163 | 164 | Here, the word **_fit_** means that the transaction has a gas limit that is lower than the remaining gas in the TxPool. 165 | 166 | The first scenario does not produce any error. 167 | 168 | ### Second scenario 169 | 170 | - The TxPool remaining gas is set to the gas limit of the last block, lets say **5000** 171 | - A first transaction is processed and consumes **3000** gas of the TxPool 172 | - The remaining gas of the TxPool is now **2000** 173 | - A second transaction, which is the same as the first one - they both consume 3000 units of gas, is submitted 174 | - Since the remaining gas of the TxPool is **lower** than the transaction gas, it cannot be processed in the current 175 | block 176 | - It is put back into a pending transaction queue so that it can be processed in the next block 177 | - The first block is written, lets call it **block #1** 178 | - The TxPool remaining gas is set to the parent block - **block #1**'s gas limit 179 | - The transaction which was put back into the TxPool pending transaction queue is now processed and written in the block 180 | - The TxPool remaining gas is now **2000** 181 | - The second block is written 182 | - ... 183 | 184 | ![TxPool Error scenario #1](/img/txpool-error-1.png) 185 | 186 | ### Third scenario 187 | - The TxPool remaining gas is set to the gas limit of the last block, lets say **5000** 188 | - A first transaction is processed and consumes **3000** gas of the TxPool 189 | - The remaining gas of the TxPool is now **2000** 190 | - A second transaction, with a gas field set to **6000** is submitted 191 | - Since the block gas limit is **lower** than the transaction gas, this transaction is discarded 192 | - It will never be able to fit in a block 193 | - The first block is written 194 | - ... 195 | 196 | 197 | ![TxPool Error scenario #2](/img/txpool-error-2.png) 198 | 199 | > This happens whenever you get the following error: 200 | > ```shell 201 | > 2021-11-04T15:41:07.665+0100 [ERROR] polygon.consensus.dev: failed to write transaction: transaction's gas limit exceeds block gas limit 202 | > ``` 203 | 204 | ## Block Gas Target 205 | 206 | There are situations when nodes want to keep the block gas limit below or at a certain target on a running chain. 207 | 208 | The node operator can set the target gas limit on a specific node, which will try to apply this limit to newly created blocks. 209 | If the majority of the other nodes also have a similar (or same) target gas limit set, then the block gas limit will always hover 210 | around that block gas target, slowly progressing towards it (at max `1/1024 * parent block gas limit`) as new blocks are created. 211 | 212 | ### Example scenario 213 | 214 | * The node operator sets the block gas limit for a single node to be `5000` 215 | * Other nodes are configured to be `5000` as well, apart from a single node which is configured to be `7000` 216 | * When the nodes who have their gas target set to `5000` become proposers, they will check to see if the gas limit is already at the target 217 | * If the gas limit is not at the target (it is greater / lower), the proposer node will set the block gas target to at most (1/1024 * parent gas limit) in the direction of the target 218 | 1. Ex: `parentGasLimit = 4500` and `blockGasTarget = 5000`, the proposer will calculate the gas limit for the new block as `4504.39453125` (`4500/1024 + 4500`) 219 | 2. Ex: `parentGasLimit = 5500` and `blockGasTarget = 5000`, the proposer will calculate the gas limit for the new block as `5494.62890625` (`5500 - 5500/1024`) 220 | * This ensures that the block gas limit in the chain will be kept at the target, because the single proposer who has the target configured to `7000` cannot advance the limit much, and the majority 221 | of the nodes who have it set at `5000` will always attempt to keep it there -------------------------------------------------------------------------------- /docs/architecture/modules/types.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: types 3 | title: Types 4 | --- 5 | 6 | ## Overview 7 | 8 | The **Types** module implements core object types, such as: 9 | 10 | * **Address** 11 | * **Hash** 12 | * **Header** 13 | * lots of helper functions 14 | 15 | ## RLP Encoding / Decoding 16 | 17 | Unlike clients such as GETH, the Polygon Edge doesn't use reflection for the encoding.
18 | The preference was to not use reflection because it introduces new problems, such as performance 19 | degradation, and harder scaling. 20 | 21 | The **Types** module provides an easy-to-use interface for RLP marshaling and unmarshalling, using the FastRLP package. 22 | 23 | Marshaling is done through the *MarshalRLPWith* and *MarshalRLPTo* methods. The analogous methods exist for 24 | unmarshalling. 25 | 26 | By manually defining these methods, the Polygon Edge doesn't need to use reflection. In *rlp_marshal.go*, you can find 27 | methods for marshaling: 28 | 29 | * **Bodies** 30 | * **Blocks** 31 | * **Headers** 32 | * **Receipts** 33 | * **Logs** 34 | * **Transactions** -------------------------------------------------------------------------------- /docs/architecture/overview.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: overview 3 | title: Architecture Overview 4 | sidebar_label: Overview 5 | --- 6 | 7 | We started with the idea of making software that is *modular*. 8 | 9 | This is something that is present in almost all parts of the Polygon Edge. Below, you will find a brief overview of the 10 | built architecture and its layering. 11 | 12 | ## Polygon Edge Layering 13 | 14 | ![Polygon Edge Architecture](/img/Architecture.jpg) 15 | 16 | ## Libp2p 17 | 18 | It all starts at the base networking layer, which utilizes **libp2p**. We decided to go with this technology because it 19 | fits into the designing philosophies of Polygon Edge. Libp2p is: 20 | 21 | - Modular 22 | - Extensible 23 | - Fast 24 | 25 | Most importantly, it provides a great foundation for more advanced features, which we'll cover later on. 26 | 27 | 28 | ## Synchronization & Consensus 29 | The separation of the synchronization and consensus protocols allows for modularity and implementation of **custom** sync and consensus mechanisms - depending on how the client is being run. 30 | 31 | Polygon Edge is designed to offer off-the-shelf pluggable consensus algorithms. 32 | 33 | The current list of supported consensus algorithms: 34 | 35 | * IBFT PoA 36 | * IBFT PoS 37 | 38 | ## Blockchain 39 | The Blockchain layer is the central layer that coordinates everything in the Polygon Edge system. It is covered in depth in the corresponding *Modules* section. 40 | 41 | ## State 42 | The State inner layer contains state transition logic. It deals with how the state changes when a new block is included. It is covered in depth in the corresponding *Modules* section. 43 | 44 | ## JSON RPC 45 | The JSON RPC layer is an API layer that dApp developers use to interact with the blockchain. It is covered in depth in the corresponding *Modules* section. 46 | 47 | ## TxPool 48 | The TxPool layer represents the transaction pool, and it is closely linked with other modules in the system, as transactions can be added from multiple entry points. 49 | 50 | ## GRPC 51 | The GRPC layer is vital for operator interactions. Through it, node operators can easily interact with the client, providing an enjoyable UX. -------------------------------------------------------------------------------- /docs/community/propose-new-feature.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: propose-new-feature 3 | title: Propose a new feature 4 | --- 5 | 6 | ## Overview 7 | 8 | If you want to include a fix, or just contribute to the code, it is highly encouraged for you to reach out to the team first.
9 | The Polygon Edge uses a relatively basic feature proposition template, that is concise and to the point. 10 | 11 | ## PR Template 12 | 13 | ### Description 14 | 15 | Please provide a detailed description of what was done in this PR 16 | 17 | ### Changes include 18 | 19 | - [ ] Bugfix (non-breaking change that solves an issue) 20 | - [ ] Hotfix (change that solves an urgent issue, and requires immediate attention) 21 | - [ ] New feature (non-breaking change that adds functionality) 22 | - [ ] Breaking change (change that is not backward-compatible and/or changes current functionality) 23 | 24 | ### Breaking changes 25 | 26 | Please complete this section if any breaking changes have been made, otherwise delete it 27 | 28 | ### Checklist 29 | 30 | - [ ] I have assigned this PR to myself 31 | - [ ] I have added at least 1 reviewer 32 | - [ ] I have added the relevant labels 33 | - [ ] I have updated the official documentation 34 | - [ ] I have added sufficient documentation in code 35 | 36 | ### Testing 37 | 38 | - [ ] I have tested this code with the official test suite 39 | - [ ] I have tested this code manually 40 | 41 | ## Manual tests 42 | 43 | Please complete this section if you ran manual tests for this functionality, otherwise delete it 44 | 45 | ### Documentation update 46 | 47 | Please link the documentation update PR in this section if it's present, otherwise delete it 48 | 49 | ### Additional comments 50 | 51 | Please post additional comments in this section if you have them, otherwise delete it -------------------------------------------------------------------------------- /docs/community/report-bug.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: report-bug 3 | title: Report an issue 4 | --- 5 | 6 | ## Overview 7 | 8 | Sometimes things break.
9 | It's always better to let the team know about any issues you might be encountering.
10 | On the Polygon Edge GitHub page, you can file a new issue, and start discussing it with the team. 11 | 12 | ## Issue Template 13 | 14 | ## [Subject of the issue] 15 | 16 | ### Description 17 | 18 | Describe your issue in as much detail as possible here 19 | 20 | ### Your environment 21 | 22 | * OS and version 23 | * version of the Polygon Edge 24 | * branch that causes this issue 25 | 26 | ### Steps to reproduce 27 | 28 | * Tell us how to reproduce this issue
29 | * Where the issue is, if you know
30 | * Which commands triggered the issue, if any 31 | 32 | ### Expected behaviour 33 | 34 | Tell us what should happen 35 | 36 | ### Actual behaviour 37 | 38 | Tell us what happens instead 39 | 40 | ### Logs 41 | 42 | Please paste any logs here that demonstrate the issue, if they exist 43 | 44 | ### Proposed solution 45 | 46 | If you have an idea of how to fix this issue, please write it down here, so we can begin discussing it -------------------------------------------------------------------------------- /docs/configuration/enable-metrics.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | id: enable-metrics 3 | title: Enable Prometheus metrics 4 | --- 5 | 6 | ## Overview 7 | 8 | Polygon Edge can report and serve the Prometheus metrics, which in their turn can be consumed using Prometheus collector(s). 9 | 10 | Prometheus metrics are disabled by default. It can be enabled by specifying the listener address using `--prometheus` flag or `Telemetry.prometheus` field in the config file. 11 | Metrics will be served under `/metrics` on the specified address. 12 | 13 | ## Available metrics 14 | 15 | The following metrics are available: 16 | 17 | | **Name** | **Type** | **Description** | 18 | |-----------------------------------------------|---------------|-------------------------------------------------| 19 | | txpool_pending_transactions | Gauge | Number of pending transactions in TxPool | 20 | | consensus_validators | Gauge | Number of Validators | 21 | | consensus_rounds | Gauge | Number of Rounds | 22 | | consensus_num_txs | Gauge | Number of Transactions in the latest block | 23 | | consensus_block_interval | Gauge | Time between this and last block in seconds | 24 | | network_peers | Gauge | Number of Connected peers | 25 | | network_outbound_connections_count | Gauge | Number of outbound connections | 26 | | network_inbound_connections_count | Gauge | Number of inbound connections | 27 | | network_pending_outbound_connections_count | Gauge | Number of pending outbound connections | 28 | | network_pending_inbound_connections_count | Gauge | Number of pending inbound connections | -------------------------------------------------------------------------------- /docs/configuration/manage-private-keys.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: manage-private-keys 3 | title: Manage private keys 4 | --- 5 | 6 | ## Overview 7 | 8 | The Polygon Edge has two types of private keys that it directly manages: 9 | 10 | * **Private key used for the consensus mechanism** 11 | * **Private key used for networking by libp2p** 12 | 13 | Currently, the Polygon Edge doesn't offer support for direct account management. 14 | 15 | Based on the directory structure outlined in the [Backup & Restore guide](/docs/working-with-node/backup-restore), 16 | the Polygon Edge stores these mentioned key files in two distinct directories - **consensus** and **keystore**. 17 | 18 | ## Key format 19 | 20 | The private keys are stored in simple **Base64 format**, so they can be human-readable and portable. 21 | 22 | ```bash 23 | # Example private key 24 | 0802122068a1bdb1c8af5333e58fe586bc0e9fc7aff882da82affb678aef5d9a2b9100c0 25 | ``` 26 | 27 | :::info Key Type 28 | All private key files generated and used inside the Polygon Edge are relying on ECDSA with the curve [secp256k1](https://en.bitcoin.it/wiki/Secp256k1). 29 | 30 | As the curve is non-standard, it cannot be encoded and stored in any standardized PEM format. 31 | Importing keys that don't conform to this key type is not supported. 32 | ::: 33 | ## Consensus Private Key 34 | 35 | The private key file mentioned as the *consensus private key* is also referred to as the **validator private key**. 36 | This private key is used when the node is acting as a validator in the network and needs to sign new data. 37 | 38 | The private key file is located in `consensus/validator.key`, and adheres to the [key format](/docs/configuration/manage-private-keys#key-format) mentioned. 39 | 40 | ## Networking Private Key 41 | 42 | The private key file mentioned for networking is used by libp2p to generate the corresponding PeerID, and allow the node to participate in the network. 43 | 44 | It is located in `keystore/libp2p.key`, and adheres to the [key format](/docs/configuration/manage-private-keys#key-format) mentioned. 45 | 46 | ## Import / Export 47 | 48 | As the key files are stored in simple Base64 on disk, they can be easily backed up or imported. 49 | 50 | :::caution Changing the key files 51 | Any kind of change made to the key files on an already set up / running network can lead to serious network/consensus disruption, 52 | since the consensus and peer discovery mechanisms store the data derived from these keys in node-specific storage, and rely on this data to 53 | initiate connections and perform consensus logic 54 | ::: -------------------------------------------------------------------------------- /docs/configuration/sample-config.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: sample-config 3 | title: Server Config File 4 | --- 5 | # Server configuration file 6 | Starting the server with various configuration options can be done using a configuration file instead of using just flags. 7 | The command used to start the server with a config file: `polygon-edge server --config ` 8 | 9 | ## Export config file with default configuration 10 | The configuration with default settings for the Polygon Edge server can be exported into a config file in either `yaml` or `json` file format. 11 | This file can be used as a template for running the server using a configuration file. 12 | 13 | ### YAML 14 | To generate the config file in `yaml` format: 15 | ```bash 16 | polygon-edge server export --type yaml 17 | ``` 18 | or just 19 | ```bash 20 | polygon-edge server export 21 | ``` 22 | the config file named `default-config.yaml` will be created in the same directory that this command has been run from. 23 | 24 | File example: 25 | ```yaml 26 | chain_config: ./genesis.json 27 | secrets_config: "" 28 | data_dir: "" 29 | block_gas_target: "0x0" 30 | grpc_addr: "" 31 | jsonrpc_addr: "" 32 | telemetry: 33 | prometheus_addr: "" 34 | network: 35 | no_discover: false 36 | libp2p_addr: 127.0.0.1:1478 37 | nat_addr: "" 38 | dns_addr: "" 39 | max_peers: -1 40 | max_outbound_peers: -1 41 | max_inbound_peers: -1 42 | seal: true 43 | tx_pool: 44 | price_limit: 0 45 | max_slots: 4096 46 | log_level: INFO 47 | restore_file: "" 48 | block_time_s: 2 49 | headers: 50 | access_control_allow_origins: 51 | - '*' 52 | log_to: "" 53 | ``` 54 | 55 | ### JSON 56 | To generate the config file in `json` format: 57 | ```bash 58 | polygon-edge server export --type json 59 | ``` 60 | the config file named `default-config.json` will be created in the same directory that this command has been run from. 61 | 62 | File example: 63 | 64 | ```json 65 | { 66 | "chain_config": "./genesis.json", 67 | "secrets_config": "", 68 | "data_dir": "", 69 | "block_gas_target": "0x0", 70 | "grpc_addr": "", 71 | "jsonrpc_addr": "", 72 | "telemetry": { 73 | "prometheus_addr": "" 74 | }, 75 | "network": { 76 | "no_discover": false, 77 | "libp2p_addr": "127.0.0.1:1478", 78 | "nat_addr": "", 79 | "dns_addr": "", 80 | "max_peers": -1, 81 | "max_outbound_peers": -1, 82 | "max_inbound_peers": -1 83 | }, 84 | "seal": true, 85 | "tx_pool": { 86 | "price_limit": 0, 87 | "max_slots": 4096 88 | }, 89 | "log_level": "INFO", 90 | "restore_file": "", 91 | "block_time_s": 2, 92 | "headers": { 93 | "access_control_allow_origins": [ 94 | "*" 95 | ] 96 | }, 97 | "log_to": "" 98 | } 99 | ``` 100 | 101 | Checkout [CLI Commands](/docs/get-started/cli-commands) section to get information on how to use these parameters. 102 | 103 | ### Typescript schema 104 | 105 | The following is the sample format for the configuration file. It's written in TypeScript to express the properties types (`string`, `number`, `boolean`), from it you could derive your configuration. It's worth mentioning that the `PartialDeep` type from `type-fest` is used to express that all properties are optional. 106 | 107 | ```typescript 108 | import { PartialDeep } from 'type-fest'; 109 | 110 | type ServerConfig = PartialDeep<{ 111 | chain_config: string; // 112 | secrets_config: string; // 113 | data_dir: string; // 114 | block_gas_target: string; // 115 | grpc_addr: string; // 116 | jsonrpc_addr: string; // 117 | telemetry: { 118 | prometheus_addr: string; // 119 | }; 120 | network: { 121 | no_discover: boolean; // , 122 | libp2p_addr: string; // , 123 | nat_addr: string; // , 124 | dns_addr: string; // , 125 | max_peers: number; // , 126 | max_inbound_peers: number; // , 127 | max_outbound_peers: number; // 128 | }; 129 | seal: boolean; // 130 | txpool: { 131 | price_limit: number; // 132 | max_slots: number; // 133 | }; 134 | log_level: 'DEBUG' | 'INFO' | 'WARN' | 'ERROR' | 'DPANIC' | 'PANIC' | 'FATAL'; // 135 | restore_file: string; // 136 | block_time_s: number; // 137 | headers: Record; 138 | log_to: string; // 139 | }> 140 | ``` 141 | 142 | -------------------------------------------------------------------------------- /docs/configuration/secret-managers/set-up-aws-ssm.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: set-up-aws-ssm 3 | title: Set up AWS SSM (Systems Manager) 4 | --- 5 | 6 | ## Overview 7 | 8 | Currently, the Polygon Edge is concerned with keeping 2 major runtime secrets: 9 | * The **validator private key** used by the node, if the node is a validator 10 | * The **networking private key** used by libp2p, for participating and communicating with other peers 11 | 12 | For additional information, please read through the [Managing Private Keys Guide](/docs/configuration/manage-private-keys) 13 | 14 | The modules of the Polygon Edge **should not need to know how to keep secrets**. Ultimately, a module should not care if 15 | a secret is stored on a far-away server or locally on the node's disk. 16 | 17 | Everything a module needs to know about secret-keeping is **knowing to use the secret**, **knowing which secrets to get 18 | or save**. The finer implementation details of these operations are delegated away to the `SecretsManager`, which of course is an abstraction. 19 | 20 | The node operator that's starting the Polygon Edge can now specify which secrets manager they want to use, and as soon 21 | as the correct secrets manager is instantiated, the modules deal with the secrets through the mentioned interface - 22 | without caring if the secrets are stored on a disk or on a server. 23 | 24 | This article details the necessary steps to get the Polygon Edge up and running with 25 | [AWS Systems Manager Parameter Store](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html). 26 | 27 | :::info previous guides 28 | It is **highly recommended** that before going through this article, articles on [**Local Setup**](/docs/get-started/set-up-ibft-locally) 29 | and [**Cloud Setup**](/docs/get-started/set-up-ibft-on-the-cloud) are read. 30 | ::: 31 | 32 | 33 | ## Prerequisites 34 | ### IAM Policy 35 | User needs to create an IAM Policy that allows read/write operations for AWS Systems Manager Parameter Store. 36 | After successfully creating IAM Policy, the user needs to attach it to the EC2 instance that is running the Polygon Edge server. 37 | The IAM Policy should look something like this: 38 | ```json 39 | { 40 | "Version": "2012-10-17", 41 | "Statement" : [ 42 | { 43 | "Effect" : "Allow", 44 | "Action" : [ 45 | "ssm:PutParameter", 46 | "ssm:DeleteParameter", 47 | "ssm:GetParameter" 48 | ], 49 | "Resource" : [ 50 | "arn:aws:ssm:::parameter*" 51 | ] 52 | } 53 | ] 54 | } 55 | ``` 56 | More information on AWS SSM IAM Roles you can find in the [AWS docs](https://docs.aws.amazon.com/systems-manager/latest/userguide/setup-instance-profile.html). 57 | 58 | Required information before continuing: 59 | * **Region** (the region in which Systems Manager and nodes reside) 60 | * **Parameter Path** (arbitrary path that the secret will be placed in, for example `/polygon-edge/nodes`) 61 | 62 | ## Step 1 - Generate the secrets manager configuration 63 | 64 | In order for the Polygon Edge to be able to seamlessly communicate with the AWS SSM, it needs to parse an already 65 | generated config file, which contains all the necessary information for secret storage on AWS SSM. 66 | 67 | To generate the configuration, run the following command: 68 | 69 | ```bash 70 | polygon-edge secrets generate --type aws-ssm --dir --name --extra region=,ssm-parameter-path= 71 | ``` 72 | 73 | Parameters present: 74 | * `PATH` is the path to which the configuration file should be exported to. Default `./secretsManagerConfig.json` 75 | * `NODE_NAME` is the name of the current node for which the AWS SSM configuration is being set up as. It can be an arbitrary value. Default `polygon-edge-node` 76 | * `REGION` is the region in which the AWS SSM resides. This has to be the same region as the node utilizing AWS SSM. 77 | * `SSM_PARAM_PATH` is the name of the path that the secret will be stored in. For example if `--name node1` and `ssm-parameter-path=/polygon-edge/nodes` 78 | are specified, the secret will be stored as `/polygon-edge/nodes/node1/` 79 | 80 | :::caution Node names 81 | Be careful when specifying node names. 82 | 83 | The Polygon Edge uses the specified node name to keep track of the secrets it generates and uses on the AWS SSM. 84 | Specifying an existing node name can have consequences of failing to write secret to AWS SSM. 85 | 86 | Secrets are stored on the following base path: `SSM_PARAM_PATH/NODE_NAME` 87 | ::: 88 | 89 | ## Step 2 - Initialize secret keys using the configuration 90 | 91 | Now that the configuration file is present, we can initialize the required secret keys with the configuration 92 | file set up in step 1, using the `--config`: 93 | 94 | ```bash 95 | polygon-edge secrets init --config 96 | ``` 97 | 98 | The `PATH` param is the location of the previously generated secrets manager param from step 1. 99 | 100 | :::info IAM Policy 101 | This step will fail if IAM Policy that allows read/write operations is not configured correctly and/or not attached to the EC2 instance running this command. 102 | ::: 103 | 104 | ## Step 3 - Generate the genesis file 105 | 106 | The genesis file should be generated in a similar manner to the [**Local Setup**](/docs/get-started/set-up-ibft-locally) 107 | and [**Cloud Setup**](/docs/get-started/set-up-ibft-on-the-cloud) guides, with minor changes. 108 | 109 | Since AWS SSM is being used instead of the local file system, validator addresses should be added through the `--ibft-validator` flag: 110 | ```bash 111 | polygon-edge genesis --ibft-validator ... 112 | ``` 113 | 114 | ## Step 4 - Start the Polygon Edge client 115 | 116 | Now that the keys are set up, and the genesis file is generated, the final step to this process would be starting the 117 | Polygon Edge with the `server` command. 118 | 119 | The `server` command is used in the same manner as in the previously mentioned guides, with a minor addition - the `--secrets-config` flag: 120 | ```bash 121 | polygon-edge server --secrets-config ... 122 | ``` 123 | 124 | The `PATH` param is the location of the previously generated secrets manager param from step 1. -------------------------------------------------------------------------------- /docs/configuration/secret-managers/set-up-gcp-secrets-manager.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: set-up-gcp-secrets-manager 3 | title: Set up GCP Secrets Manager 4 | --- 5 | 6 | ## Overview 7 | 8 | Currently, the Polygon Edge is concerned with keeping 2 major runtime secrets: 9 | * The **validator private key** used by the node, if the node is a validator 10 | * The **networking private key** used by libp2p, for participating and communicating with other peers 11 | 12 | For additional information, please read through the [Managing Private Keys Guide](/docs/configuration/manage-private-keys) 13 | 14 | The modules of the Polygon Edge **should not need to know how to keep secrets**. Ultimately, a module should not care if 15 | a secret is stored on a far-away server or locally on the node's disk. 16 | 17 | Everything a module needs to know about secret-keeping is **knowing to use the secret**, **knowing which secrets to get 18 | or save**. The finer implementation details of these operations are delegated away to the `SecretsManager`, which of course is an abstraction. 19 | 20 | The node operator that's starting the Polygon Edge can now specify which secrets manager they want to use, and as soon 21 | as the correct secrets manager is instantiated, the modules deal with the secrets through the mentioned interface - 22 | without caring if the secrets are stored on a disk or on a server. 23 | 24 | This article details the necessary steps to get the Polygon Edge up and running with [GCP Secret Manager](https://cloud.google.com/secret-manager). 25 | 26 | :::info previous guides 27 | It is **highly recommended** that before going through this article, articles on [**Local Setup**](/docs/get-started/set-up-ibft-locally) 28 | and [**Cloud Setup**](/docs/get-started/set-up-ibft-on-the-cloud) are read. 29 | ::: 30 | 31 | 32 | ## Prerequisites 33 | ### GCP Billing Account 34 | In order to utilize GCP Secrets Manager, the user has to have [Billing Account](https://console.cloud.google.com/) enabled on the GCP portal. 35 | New Google accounts on GCP platform are provided with some funds to get started, as a king of free trial. 36 | More info [GCP docs](https://cloud.google.com/free) 37 | 38 | ### Secrets Manager API 39 | The user will need to enable the GCP Secrets Manager API, before he can use it. 40 | This can be done via [Secrets Manager API portal](https://console.cloud.google.com/apis/library/secretmanager.googleapis.com). 41 | More info: [Configuring Secret Manger](https://cloud.google.com/secret-manager/docs/configuring-secret-manager) 42 | 43 | ### GCP Credentials 44 | Finally, the user needs to generate new credentials that will be used for authentication. 45 | This can be done by following the instructions posted [here](https://cloud.google.com/secret-manager/docs/reference/libraries). 46 | The generated json file containing credentials, should be transferred to each node that needs to utilize GCP Secrets Manager. 47 | 48 | Required information before continuing: 49 | * **Project ID** (the project id defined on GCP platform) 50 | * **Credentials File Location** (the path to the json file containing the credentials) 51 | 52 | ## Step 1 - Generate the secrets manager configuration 53 | 54 | In order for the Polygon Edge to be able to seamlessly communicate with the GCP SM, it needs to parse an already 55 | generated config file, which contains all the necessary information for secret storage on GCP SM. 56 | 57 | To generate the configuration, run the following command: 58 | 59 | ```bash 60 | polygon-edge secrets generate --type gcp-ssm --dir --name --extra project-id=,gcp-ssm-cred= 61 | ``` 62 | 63 | Parameters present: 64 | * `PATH` is the path to which the configuration file should be exported to. Default `./secretsManagerConfig.json` 65 | * `NODE_NAME` is the name of the current node for which the GCP SM configuration is being set up as. It can be an arbitrary value. Default `polygon-edge-node` 66 | * `PROJECT_ID` is the ID of the project the user has defined in GCP console during account setup and Secrets Manager API activation. 67 | * `GCP_CREDS_FILE` is the path to the json file containing credentials which will allow read/write access to the Secrets Manager. 68 | 69 | :::caution Node names 70 | Be careful when specifying node names. 71 | 72 | The Polygon Edge uses the specified node name to keep track of the secrets it generates and uses on the GCP SM. 73 | Specifying an existing node name can have consequences of failing to write secret to GCP SM. 74 | 75 | Secrets are stored on the following base path: `projects/PROJECT_ID/NODE_NAME` 76 | ::: 77 | 78 | ## Step 2 - Initialize secret keys using the configuration 79 | 80 | Now that the configuration file is present, we can initialize the required secret keys with the configuration 81 | file set up in step 1, using the `--config`: 82 | 83 | ```bash 84 | polygon-edge secrets init --config 85 | ``` 86 | 87 | The `PATH` param is the location of the previously generated secrets manager param from step 1. 88 | 89 | ## Step 3 - Generate the genesis file 90 | 91 | The genesis file should be generated in a similar manner to the [**Local Setup**](/docs/get-started/set-up-ibft-locally) 92 | and [**Cloud Setup**](/docs/get-started/set-up-ibft-on-the-cloud) guides, with minor changes. 93 | 94 | Since GCP SM is being used instead of the local file system, validator addresses should be added through the `--ibft-validator` flag: 95 | ```bash 96 | polygon-edge genesis --ibft-validator ... 97 | ``` 98 | 99 | ## Step 4 - Start the Polygon Edge client 100 | 101 | Now that the keys are set up, and the genesis file is generated, the final step to this process would be starting the 102 | Polygon Edge with the `server` command. 103 | 104 | The `server` command is used in the same manner as in the previously mentioned guides, with a minor addition - the `--secrets-config` flag: 105 | ```bash 106 | polygon-edge server --secrets-config ... 107 | ``` 108 | 109 | The `PATH` param is the location of the previously generated secrets manager param from step 1. -------------------------------------------------------------------------------- /docs/configuration/secret-managers/set-up-hashicorp-vault.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: set-up-hashicorp-vault 3 | title: Set up Hashicorp Vault 4 | --- 5 | 6 | ## Overview 7 | 8 | Currently, the Polygon Edge is concerned with keeping 2 major runtime secrets: 9 | * The **validator private key** used by the node, if the node is a validator 10 | * The **networking private key** used by libp2p, for participating and communicating with other peers 11 | 12 | For additional information, please read through the [Managing Private Keys Guide](/docs/configuration/manage-private-keys) 13 | 14 | The modules of the Polygon Edge **should not need to know how to keep secrets**. Ultimately, a module should not care if 15 | a secret is stored on a far-away server or locally on the node's disk. 16 | 17 | Everything a module needs to know about secret-keeping is **knowing to use the secret**, **knowing which secrets to get 18 | or save**. The finer implementation details of these operations are delegated away to the `SecretsManager`, which of course is an abstraction. 19 | 20 | The node operator that's starting the Polygon Edge can now specify which secrets manager they want to use, and as soon 21 | as the correct secrets manager is instantiated, the modules deal with the secrets through the mentioned interface - 22 | without caring if the secrets are stored on a disk or on a server. 23 | 24 | This article details the necessary steps to get the Polygon Edge up and running with a [Hashicorp Vault](https://www.vaultproject.io/) server. 25 | 26 | :::info previous guides 27 | It is **highly recommended** that before going through this article, articles on [**Local Setup**](/docs/get-started/set-up-ibft-locally) 28 | and [**Cloud Setup**](/docs/get-started/set-up-ibft-on-the-cloud) are read. 29 | ::: 30 | 31 | 32 | ## Prerequisites 33 | 34 | This article assumes that a functioning instance of the Hashicorp Vault server **is already set up**. 35 | 36 | Additionally, it is required that the Hashicorp Vault server being used for the Polygon Edge should have **enabled KV storage**. 37 | 38 | Required information before continuing: 39 | * **The server URL** (the API URL of the Hashicorp Vault server) 40 | * **Token** (access token used for access to the KV storage engine) 41 | 42 | ## Step 1 - Generate the secrets manager configuration 43 | 44 | In order for the Polygon Edge to be able to seamlessly communicate with the Vault server, it needs to parse an already 45 | generated config file, which contains all the necessary information for secret storage on Vault. 46 | 47 | To generate the configuration, run the following command: 48 | 49 | ```bash 50 | polygon-edge secrets generate --dir --token --server-url --name 51 | ``` 52 | 53 | Parameters present: 54 | * `PATH` is the path to which the configuration file should be exported to. Default `./secretsManagerConfig.json` 55 | * `TOKEN` is the access token previously mentioned in the [prerequisites section](/docs/configuration/secret-managers/set-up-hashicorp-vault#prerequisites) 56 | * `SERVER_URL` is the URL of the API for the Vault server, also mentioned in the [prerequisites section](/docs/configuration/secret-managers/set-up-hashicorp-vault#prerequisites) 57 | * `NODE_NAME` is the name of the current node for which the Vault configuration is being set up as. It can be an arbitrary value. Default `polygon-edge-node` 58 | 59 | :::caution Node names 60 | Be careful when specifying node names. 61 | 62 | The Polygon Edge uses the specified node name to keep track of the secrets it generates and uses on the Vault instance. 63 | Specifying an existing node name can have consequences of data being overwritten on the Vault server. 64 | 65 | Secrets are stored on the following base path: `secrets/node_name` 66 | ::: 67 | 68 | ## Step 2 - Initialize secret keys using the configuration 69 | 70 | Now that the configuration file is present, we can initialize the required secret keys with the configuration 71 | file set up in step 1, using the `--config`: 72 | 73 | ```bash 74 | polygon-edge secrets init --config 75 | ``` 76 | 77 | The `PATH` param is the location of the previously generated secrets manager param from step 1. 78 | 79 | ## Step 3 - Generate the genesis file 80 | 81 | The genesis file should be generated in a similar manner to the [**Local Setup**](/docs/get-started/set-up-ibft-locally) 82 | and [**Cloud Setup**](/docs/get-started/set-up-ibft-on-the-cloud) guides, with minor changes. 83 | 84 | Since Hashicorp Vault is being used instead of the local file system, validator addresses should be added through the `--ibft-validator` flag: 85 | ```bash 86 | polygon-edge genesis --ibft-validator ... 87 | ``` 88 | 89 | ## Step 4 - Start the Polygon Edge client 90 | 91 | Now that the keys are set up, and the genesis file is generated, the final step to this process would be starting the 92 | Polygon Edge with the `server` command. 93 | 94 | The `server` command is used in the same manner as in the previously mentioned guides, with a minor addition - the `--secrets-config` flag: 95 | ```bash 96 | polygon-edge server --secrets-config ... 97 | ``` 98 | 99 | The `PATH` param is the location of the previously generated secrets manager param from step 1. -------------------------------------------------------------------------------- /docs/consensus/migration-to-pos.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: migration-to-pos 3 | title: Migration from PoA to PoS 4 | --- 5 | 6 | ## Overview 7 | 8 | This section guides you through the migration from PoA to PoS IBFT modes, and vice versa, for a running cluster - without the need to reset the blockchain. 9 | 10 | ## How to migrate to PoS 11 | 12 | You will need to stop all nodes, add fork configuration into genesis.json by `ibft switch` command, and restart the nodes. 13 | 14 | ````bash 15 | polygon-edge ibft switch --chain ./genesis.json --type PoS --deployment 100 --from 200 16 | ```` 17 | 18 | To switch to PoS, you will need to specify 2 block heights: `deployment` and `from`. `deployment` is the height to deploy the staking contract and `from` is the height of beginning of PoS. The staking contract will be deployed at the address `0x0000000000000000000000000000000000001001` at the `deployment`, like as the case of pre-deployed contract. 19 | 20 | Please check [Proof of Stake](/docs/consensus/pos-concepts) for more details about Staking contract. 21 | 22 | :::warning Validators need to stake manually 23 | Each validator needs to stake after contract is deployed at `deployment` and before `from` in order to be a validator at the beginning of PoS. Each validator will update own validator set by the set in the staking contract at the beginning of PoS. 24 | 25 | To find out more about Staking, visit the [Set up and use Proof of Stake ](/docs/consensus/pos-stake-unstake). 26 | ::: 27 | -------------------------------------------------------------------------------- /docs/consensus/poa.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: poa 3 | title: Proof of Authority (PoA) 4 | --- 5 | 6 | ## Overview 7 | 8 | The IBFT PoA is the default consensus mechanism in the Polygon Edge. In PoA, validators are the ones responsible for creating the blocks and adding them to the blockchain in a series. 9 | 10 | All of the validators make up a dynamic validator-set, where validators can be added to or removed from the set by employing a voting mechanism. This means that validators can be voted in/out from the validators-set if the majority (51%) of the validator nodes vote to add/drop a certain validator to/from the set. In this way, malicious validators can be recognized and removed from the network, while new trusted validators can be added to the network. 11 | 12 | All of the validators take turns in proposing the next block (round-robin), and for the block to be validated/inserted in the blockchain, a supermajority (more than 2/3) of the validators must approve the said block. 13 | 14 | Besides validators, there are non-validators who do not participate in the block creation but do participate in the block validation process. 15 | 16 | ## Adding a validator to the validator-set 17 | 18 | This guide describes how to add a new validator node to an active IBFT network with 4 validator nodes. 19 | If you need help setting up the the network refer to the [Local Setup](/docs/get-started/set-up-ibft-locally) / [Cloud Setup](/docs/get-started/set-up-ibft-on-the-cloud) sections. 20 | 21 | ### Step 1: Initialize data folders for IBFT and generate validator keys​ for the new node 22 | 23 | In order to get up and running with IBFT on the new node, you first need to initialize the data folders and generate the keys: 24 | 25 | ````bash 26 | polygon-edge secrets init --data-dir test-chain-5 27 | ```` 28 | 29 | This command will print the validator key (address) and the node ID. You will need the validator key (address) for the next step. 30 | 31 | ### Step 2: Propose a new candidate from other validator nodes 32 | 33 | For a new node to become a validator at least 51% of validators need to propose him. 34 | 35 | Example of how to propose a new validator (`0x8B15464F8233F718c8605B16eBADA6fc09181fC2`) from the existing validator node on grpc address: 127.0.0.1:10000: 36 | 37 | ````bash 38 | polygon-edge ibft propose --grpc-address 127.0.0.1:10000 --addr 0x8B15464F8233F718c8605B16eBADA6fc09181fC2 --vote auth 39 | ```` 40 | 41 | The structure of the IBFT commands is covered in the [CLI Commands](/docs/get-started/cli-commands) section. 42 | 43 | ### Step 3: Run the client node 44 | 45 | Because in this example we are attempting to run the network where all nodes are on the same machine, we need to take care to avoid port conflicts. 46 | 47 | ````bash 48 | polygon-edge server --data-dir ./test-chain-5 --chain genesis.json --grpc-address :50000 --libp2p :50001 --jsonrpc :50002 --seal 49 | ```` 50 | 51 | After fetching all blocks, inside your console you will notice that a new node is participating in the validation 52 | 53 | ````bash 54 | 2021-12-01T14:56:48.369+0100 [INFO] polygon.consensus.ibft.acceptState: Accept state: sequence=4004 55 | 2021-12-01T14:56:48.369+0100 [INFO] polygon.consensus.ibft: current snapshot: validators=5 votes=0 56 | 2021-12-01T14:56:48.369+0100 [INFO] polygon.consensus.ibft: proposer calculated: proposer=0x8B15464F8233F718c8605B16eBADA6fc09181fC2 block=4004 57 | ```` 58 | 59 | :::info Promoting a non-validator to a validator 60 | Naturally, a non-validator can become a validator by the voting process, but for it to be successfully included in the validator-set after the voting process, the node has to be restarted with the `--seal` flag. 61 | ::: 62 | 63 | ## Removing a validator from the validator-set 64 | 65 | This operation is fairly simple. To remove a validator node from the validator-set, this command needs to be performed for the majority of the validator nodes. 66 | 67 | ````bash 68 | polygon-edge ibft propose --grpc-address 127.0.0.1:10000 --addr 0x8B15464F8233F718c8605B16eBADA6fc09181fC2 --vote drop 69 | ```` 70 | 71 | After the commands are performed, observe that the number of validators has dropped (in this log example from 4 to 3). 72 | 73 | ````bash 74 | 2021-12-15T19:20:51.014+0100 [INFO] polygon.consensus.ibft.acceptState: Accept state: sequence=2399 round=1 75 | 2021-12-15T19:20:51.014+0100 [INFO] polygon.consensus.ibft: current snapshot: validators=4 votes=2 76 | 2021-12-15T19:20:51.015+0100 [INFO] polygon.consensus.ibft.acceptState: we are the proposer: block=2399 77 | 2021-12-15T19:20:51.015+0100 [INFO] polygon.consensus.ibft: picked out txns from pool: num=0 remaining=0 78 | 2021-12-15T19:20:51.015+0100 [INFO] polygon.consensus.ibft: build block: number=2399 txns=0 79 | 2021-12-15T19:20:53.002+0100 [INFO] polygon.consensus.ibft: state change: new=ValidateState 80 | 2021-12-15T19:20:53.009+0100 [INFO] polygon.consensus.ibft: state change: new=CommitState 81 | 2021-12-15T19:20:53.009+0100 [INFO] polygon.blockchain: write block: num=2399 parent=0x768b3bdf26cdc770525e0be549b1fddb3e389429e2d302cb52af1722f85f798c 82 | 2021-12-15T19:20:53.011+0100 [INFO] polygon.blockchain: new block: number=2399 hash=0x6538286881d32dc7722dd9f64b71ec85693ee9576e8a2613987c4d0ab9d83590 txns=0 generation_time_in_sec=2 83 | 2021-12-15T19:20:53.011+0100 [INFO] polygon.blockchain: new head: hash=0x6538286881d32dc7722dd9f64b71ec85693ee9576e8a2613987c4d0ab9d83590 number=2399 84 | 2021-12-15T19:20:53.011+0100 [INFO] polygon.consensus.ibft: block committed: sequence=2399 hash=0x6538286881d32dc7722dd9f64b71ec85693ee9576e8a2613987c4d0ab9d83590 validators=4 rounds=1 committed=3 85 | 2021-12-15T19:20:53.012+0100 [INFO] polygon.consensus.ibft: state change: new=AcceptState 86 | 2021-12-15T19:20:53.012+0100 [INFO] polygon.consensus.ibft.acceptState: Accept state: sequence=2400 round=1 87 | 2021-12-15T19:20:53.012+0100 [INFO] polygon.consensus.ibft: current snapshot: validators=3 votes=0 88 | 2021-12-15T19:20:53.012+0100 [INFO] polygon.consensus.ibft: proposer calculated: proposer=0xea21efC826F4f3Cb5cFc0f986A4d69C095c2838b block=2400 89 | ```` -------------------------------------------------------------------------------- /docs/consensus/pos-concepts.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: pos-concepts 3 | title: Proof of Stake 4 | --- 5 | 6 | ## Overview 7 | 8 | This section aims to give a better overview of some concepts currently present in the Proof of Stake (PoS) 9 | implementation of the Polygon Edge. 10 | 11 | The Polygon Edge Proof of Stake (PoS) implementation is meant to be an alternative to the existing PoA IBFT implementation, 12 | giving node operators the ability to easily choose between the two when starting a chain. 13 | 14 | ## PoS Features 15 | 16 | The core logic behind the Proof of Stake implementation is situated within 17 | the [Staking Smart Contract](https://github.com/0xPolygon/staking-contracts/blob/main/contracts/staking.sol). 18 | 19 | This contract is pre-deployed whenever a PoS mechanism Polygon Edge chain is initialized, and is available on the address 20 | `0x0000000000000000000000000000000000001001` from block `0`. 21 | 22 | ### Epochs 23 | 24 | Epochs are a concept introduced with the addition of PoS to the Polygon Edge. 25 | 26 | Epochs are considered to be a special time frame (in blocks) in which a certain set of validators can produce blocks. 27 | Their lengths are modifiable, meaning node operators can configure the length of an epoch during genesis generation. 28 | 29 | At the end of each epoch, an _epoch block_ is created, and after that event a new epoch starts. To learn more about 30 | epoch blocks, see the [Epoch Blocks](/docs/consensus/pos-concepts#epoch-blocks) section. 31 | 32 | Validator sets are updated at the end of each epoch. Nodes query the validator set from the Staking Smart Contract 33 | during the creation of the epoch block, and save the obtained data to local storage. This query and save cycle is 34 | repeated at the end of each epoch. 35 | 36 | Essentially, it ensures that the Staking Smart Contract has full control over the addresses in the validator set, and 37 | leaves nodes with only 1 responsibility - to query the contract once during an epoch for fetching the latest validator 38 | set information. This alleviates the responsibility from individual nodes from taking care of validator sets. 39 | 40 | ### Staking 41 | 42 | Addresses can stake funds on the Staking Smart Contract by invoking the `stake` method, and by specifying a value for 43 | the staked amount in the transaction: 44 | 45 | ````js 46 | const StakingContractFactory = await ethers.getContractFactory("Staking"); 47 | let stakingContract = await StakingContractFactory.attach(STAKING_CONTRACT_ADDRESS) 48 | as 49 | Staking; 50 | stakingContract = stakingContract.connect(account); 51 | 52 | const tx = await stakingContract.stake({value: STAKE_AMOUNT}) 53 | ```` 54 | 55 | By staking funds on the Staking Smart Contract, addresses can enter the validator set and thus be able to participate in 56 | the block production process. 57 | 58 | :::info Threshold for staking 59 | Currently, the minimum threshold for entering the validator set is staking `1 ETH` 60 | ::: 61 | 62 | ### Unstaking 63 | 64 | Addresses that have staked funds can only **unstake all of their staked funds at once**. 65 | 66 | Unstaking can be invoked by calling the `unstake` method on the Staking Smart Contract: 67 | 68 | ````js 69 | const StakingContractFactory = await ethers.getContractFactory("Staking"); 70 | let stakingContract = await StakingContractFactory.attach(STAKING_CONTRACT_ADDRESS) 71 | as 72 | Staking; 73 | stakingContract = stakingContract.connect(account); 74 | 75 | const tx = await stakingContract.unstake() 76 | ```` 77 | 78 | After unstaking their funds, addresses are removed from the validator set on the Staking Smart Contract, and will not be 79 | considered validators during the next epoch. 80 | 81 | ## Epoch Blocks 82 | 83 | **Epoch Blocks** are a concept introduced in the PoS implementation of IBFT in Polygon Edge. 84 | 85 | Essentially, epoch blocks are special blocks that contain **no transactions** and occur only at **the end of an epoch**. 86 | For example, if the **epoch size** is set to `50` blocks, epoch blocks would be considered to be blocks `50`, `100` 87 | , `150` and so on. 88 | 89 | They are used to performing additional logic that shouldn't occur during regular block production. 90 | 91 | Most importantly, they are an indication to the node that **it needs to fetch the latest validator set** information 92 | from the Staking Smart Contract. 93 | 94 | After updating the validator set at the epoch block, the validator set (either changed or unchanged) 95 | is used for the subsequent `epochSize - 1` blocks, until it gets updated again by pulling the latest information from 96 | the Staking Smart Contract. 97 | 98 | Epoch lengths (in blocks) are modifiable when generating the genesis file, by using a special flag `--epoch-size`: 99 | 100 | ```bash 101 | polygon-edge genesis --epoch-size 50 ... 102 | ``` 103 | 104 | The default size of an epoch is `100000` blocks in the Polygon Edge. 105 | 106 | ## Contract pre-deployment 107 | 108 | The Polygon Edge _pre-deploys_ 109 | the [Staking Smart Contract](https://github.com/0xPolygon/staking-contracts/blob/main/contracts/Staking.sol) 110 | during **genesis generation** to the address `0x0000000000000000000000000000000000001001`. 111 | 112 | It does so without a running EVM, by modifying the blockchain state of the Smart Contract directly, using the passed in 113 | configuration values to the genesis command. 114 | -------------------------------------------------------------------------------- /docs/consensus/pos-stake-unstake.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: pos-stake-unstake 3 | title: Set up and use Proof of Stake (PoS) 4 | --- 5 | 6 | ## Overview 7 | 8 | This guide goes into detail on how to set up a Proof of Stake network with the Polygon Edge, how to stake funds for nodes 9 | to become validators and how to unstake funds. 10 | 11 | It **highly encouraged** to read and go through 12 | the [Local Setup](/docs/get-started/set-up-ibft-locally) 13 | / [Cloud Setup](/docs/get-started/set-up-ibft-on-the-cloud) sections, before going along 14 | with this PoS guide. These sections outline the steps needed to start a Proof of Authority (PoA) cluster with the 15 | Polygon Edge. 16 | 17 | Currently, there is no limit to the number of validators that can stake funds on the Staking Smart Contract. 18 | 19 | ## Staking Smart Contract 20 | 21 | The repo for the Staking Smart Contract is located [here](https://github.com/0xPolygon/staking-contracts). 22 | 23 | It holds the necessary testing scripts, ABI files and most importantly the Staking Smart Contract itself. 24 | 25 | ## Setting up an N node cluster 26 | 27 | Setting up a network with the Polygon Edge is covered in 28 | the [Local Setup](/docs/get-started/set-up-ibft-locally) 29 | / [Cloud Setup](/docs/get-started/set-up-ibft-on-the-cloud) sections. 30 | 31 | The **only difference** between setting up a PoS and PoA cluster is in the genesis generation part. 32 | 33 | **When generating the genesis file for a PoS cluster, an additional flag is needed `--pos`**: 34 | 35 | ```bash 36 | polygon-edge genesis --pos ... 37 | ``` 38 | 39 | ## Setting the length of an epoch 40 | 41 | Epochs are covered in detail in the [Epoch Blocks](/docs/consensus/pos-concepts#epoch-blocks) section. 42 | 43 | To set the size of an epoch for a cluster (in blocks), when generating the genesis file, an additional flag is 44 | specified `--epoch-size`: 45 | 46 | ```bash 47 | polygon-edge genesis --epoch-size 50 48 | ``` 49 | 50 | This value specified in the genesis file that the epoch size should be `50` blocks. 51 | 52 | The default value for the size of an epoch (in blocks) is `100000`. 53 | 54 | :::info Lowering the epoch length 55 | As outlined in the [Epoch Blocks](/docs/consensus/pos-concepts#epoch-blocks) section, 56 | epoch blocks are used to update the validator sets for nodes. 57 | 58 | The default epoch length in blocks (`100000`) may be a long time to way for validator set updates. Considering that new 59 | blocks are added ~2s, it would take ~55.5h for the validator set to possibly change. 60 | 61 | Setting a lower value for the epoch length ensures that the validator set is updated more frequently. 62 | ::: 63 | 64 | ## Using the Staking Smart Contract scripts 65 | 66 | ### Prerequisites 67 | 68 | The Staking Smart Contract repo is a Hardhat project, which requires NPM. 69 | 70 | To initialize it correctly, in the main directory run: 71 | 72 | ```bash 73 | npm install 74 | ```` 75 | 76 | ### Setting up the provided helper scripts 77 | 78 | Scripts for interacting with the deployed Staking Smart Contract are located on 79 | the [Staking Smart Contract repo](https://github.com/0xPolygon/staking-contracts). 80 | 81 | Create an `.env` file with the following parameters in the Smart Contracts repo location: 82 | 83 | ```bash 84 | JSONRPC_URL=http://localhost:10002 85 | PRIVATE_KEYS=0x0454f3ec51e7d6971fc345998bb2ba483a8d9d30d46ad890434e6f88ecb97544 86 | STAKING_CONTRACT_ADDRESS=0x0000000000000000000000000000000000001001 87 | ``` 88 | 89 | Where the parameters are: 90 | 91 | * **JSONRPC_URL** - the JSON-RPC endpoint for the running node 92 | * **PRIVATE_KEYS** - private keys of the staker address 93 | * **STAKING_CONTRACT_ADDRESS** - the address of the staking smart contract ( 94 | default `0x0000000000000000000000000000000000001001`) 95 | 96 | ### Staking funds 97 | 98 | :::info Staking address 99 | The Staking Smart Contract is pre-deployed at 100 | address `0x0000000000000000000000000000000000001001`. 101 | 102 | Any kind of interaction with the staking mechanism is done through the Staking Smart Contract at the specified address. 103 | 104 | To learn more about the Staking Smart Contract, please visit 105 | the [Staking Smart Contract](/docs/consensus/pos-concepts#contract-pre-deployment) 106 | section. 107 | ::: 108 | 109 | In order to become part of the validator set, an address needs to stake a certain amount of funds above a threshold. 110 | 111 | Currently, the default threshold for becoming part of the validator set is `1 ETH`. 112 | 113 | Staking can be initiated by calling the `stake` method of the Staking Smart Contract, and specifying a value `>= 1 ETH`. 114 | 115 | After the `.env` file mentioned in 116 | the [previous section](/docs/consensus/pos-stake-unstake#setting-up-the-provided-helper-scripts) has been set up, and a 117 | chain has been started in PoS mode, staking can be done with the following command in the Staking Smart Contract repo: 118 | 119 | ```bash 120 | npm run stake 121 | ``` 122 | 123 | The `stake` Hardhat script stakes a default amount of `1 ETH`, which can be changed by modifying the `scripts/stake.ts` 124 | file. 125 | 126 | If the funds being staked are `>= 1 ETH`, the validator set on the Staking Smart Contract is updated, and the address 127 | will be part of the validator set starting from the next epoch. 128 | 129 | ### Unstaking funds 130 | 131 | Addresses that have a stake can **only unstake all of their funds** at once. 132 | 133 | After the `.env` file mentioned in 134 | the [previous section](/docs/consensus/pos-stake-unstake#setting-up-the-provided-helper-scripts) 135 | has been set up, and a chain has been started in PoS mode, unstaking can be done with the following command in the 136 | Staking Smart Contract repo: 137 | 138 | ```bash 139 | npm run unstake 140 | ``` 141 | 142 | ### Fetching the list of stakers 143 | 144 | All addresses that stake funds are saved to the Staking Smart Contract. 145 | 146 | After the `.env` file mentioned in 147 | the [previous section](/docs/consensus/pos-stake-unstake#setting-up-the-provided-helper-scripts) 148 | has been set up, and a chain has been started in PoS mode, fetching the list of validators can be done with the 149 | following command in the Staking Smart Contract repo: 150 | 151 | ```bash 152 | npm run info 153 | ``` 154 | -------------------------------------------------------------------------------- /docs/get-started/installation.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: installation 3 | title: Installation 4 | --- 5 | 6 | Please refer to the installation method more applicable to you. 7 | 8 | Our recommendation is to use the pre-built releases and verify the provided checksums. 9 | 10 | ## Pre-built releases 11 | 12 | Please refer to the [GitHub Releases](https://github.com/0xPolygon/polygon-edge/releases) page for a list of releases. 13 | 14 | Polygon Edge comes with cross-compiled AMD64/ARM64 binaries for Darwin and Linux. 15 | 16 | --- 17 | 18 | ## Docker image 19 | 20 | Official Docker images are hosted under the [hub.docker.com registry](https://hub.docker.com/r/0xpolygon/polygon-edge). 21 | 22 | `docker pull 0xpolygon/polygon-edge:latest` 23 | 24 | --- 25 | 26 | ## Building from source 27 | 28 | Prior to using `go install` make sure that you have Go `>=1.17` installed and properly configured. 29 | 30 | The stable branch is `develop`. 31 | 32 | ```shell 33 | git clone https://github.com/0xPolygon/polygon-edge.git 34 | cd polygon-edge/ 35 | go build main.go -o polygon-edge 36 | sudo mv polygon-edge /usr/local/bin 37 | ``` 38 | 39 | --- 40 | 41 | ## Using `go install` 42 | 43 | Prior to using `go install` make sure that you have Go `>=1.17` installed and properly configured. 44 | 45 | `go install github.com/0xPolygon/polygon-edge@develop` 46 | 47 | The binary will be available in your `GOBIN` environment variable, and will include the latest changes from the mainline `develop` branch. 48 | -------------------------------------------------------------------------------- /docs/overview.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: overview 3 | title: Polygon Edge 4 | sidebar_label: Overview 5 | --- 6 | 7 | Polygon Edge is a modular and extensible framework for building Ethereum-compatible blockchain networks, sidechains, and general scaling solutions. 8 | 9 | Its primary use is to bootstrap a new blockchain network while providing full compatibility with Ethereum smart contracts and transactions. It uses IBFT (Istanbul Byzantine Fault Tolerant) consensus mechanism, supported in two flavours as [PoA (proof of authority)](/docs/consensus/poa) and [PoS (proof of stake)](/docs/consensus/pos-stake-unstake). 10 | 11 | Polygon Edge also supports communication with multiple blockchain networks, enabling transfers of both [ERC-20](https://ethereum.org/en/developers/docs/standards/tokens/erc-20) and [ERC-721](https://ethereum.org/en/developers/docs/standards/tokens/erc-721) tokens, by utilising the [centralised bridge solution](/docs/additional-features/chainbridge/overview). 12 | 13 | Industry standard wallets can be used to interact with Polygon Edge through the [JSON-RPC](/docs/working-with-node/query-json-rpc) endpoints and node operators can perform various actions on the nodes through the [gRPC](/docs/working-with-node/query-operator-info) protocol. 14 | 15 | To find out more about Polygon, visit the [official website](https://polygon.technology). 16 | 17 | **[GitHub repository](https://github.com/0xPolygon/polygon-edge)** 18 | 19 | :::caution 20 | 21 | This is a work in progress so architectural changes may happen in the future. The code has not been audited 22 | yet, so please contact the Polygon team if you would like to use it in production. 23 | 24 | ::: 25 | 26 | 27 | 28 | To get started by running a `polygon-edge` network locally, please read: [Installation](/docs/get-started/installation) and [Local Setup](/docs/get-started/set-up-ibft-locally). 29 | -------------------------------------------------------------------------------- /docs/performance-reports/overview.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: overview 3 | title: Overview 4 | --- 5 | 6 | | Type | Value | Link to test | 7 | | ---- | ----- | ------------ | 8 | | Regular Transfers | 689 tps | [March 23rd 2022](test-history/test-2022-03-23.md) | 9 | | ERC-20 Transfers | 500 tps | [March 23rd 2022](test-history/test-2022-03-23.md) | 10 | | NFT Minting | 487 tps | [March 23rd 2022](test-history/test-2022-03-23.md) | 11 | 12 | --- 13 | 14 | Our goal is to make a highly-performant, feature-rich and easy to setup and maintain blockchain client software. 15 | All tests were done using the [Polygon Edge Loadbot](../additional-features/stress-testing.md). 16 | Every performance report you will find in this section is properly dated, environment clearly described and the testing method clearly explained. 17 | 18 | The goal of these performance tests is to show a real world performance of Polygon Edge blockchain network. 19 | Anyone should be able to get the same results as posted here, on the same environment, using our loadbot. 20 | 21 | All of the performance tests were conducted on the AWS platform on a chain consisting of EC2 instance nodes. -------------------------------------------------------------------------------- /docs/performance-reports/test-history/test-2022-01-21.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: test-2022-01-21 3 | title: January 21st 2022 4 | --- 5 | 6 | ## January 21st 2022 7 | 8 | ### Summary 9 | 10 | This test was done after the TxPool refactor which significantly improved performance (released in [v0.2.0](https://github.com/0xPolygon/polygon-edge/releases/v0.2.0)). 11 | 12 | The goal was to setup a large network consisting of 30 actively participating validators in order to properly stress test the 13 | consensus and TxPool transaction gossiping as all transactions were sent to a single node's JSON-RPC. 14 | 15 | Our aim was not to strive to reach a maximum possible TPS, as the network size negatively impacts the performance, 16 | and since block gas limit & block time are set to sane values that don't consume much system resources, and would allow this to run on commodity hardware. 17 | 18 | ### Results 19 | 20 | | Metric | Value | 21 | | ------ | ----- | 22 | | Transactions per second | 344 | 23 | | Transactions failed | 0 | 24 | | Transactions succeeded | 10000 | 25 | | Total run time | 30s | 26 | 27 | ### Environment 28 | 29 |
30 | Host Configuration 31 |
32 |
33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 |
Cloud providerAWS
Instance sizet2.xlarge
Networkingprivate subnet
Operating systemLinux Ubuntu 20.04 LTS - Focal Fossa
File descriptor limit65535
Max user processes65535
59 |
60 |
61 |
62 |
63 | 64 |
65 | Blockchain Configuration 66 |
67 |
68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 |
Polygon Edge versionCommit 8377162281d1a2e4342ae27cd4e5367c4364aee2 on develop branch
Validator nodes30
Non-validator nodes0
ConsensusIBFT PoA
Block time2000ms
Block gas limit5242880
94 |
95 |
96 |
97 |
98 | 99 |
100 | Loadbot Configuration 101 |
102 |
103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 |
Total Transactions10000
Transactions sent per second400
Type of transactionsEOA to EOA transfers
117 |
118 |
119 |
120 |
121 | -------------------------------------------------------------------------------- /docs/working-with-node/backup-restore.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: backup-restore 3 | title: Backup/restore node instance 4 | --- 5 | 6 | ## Overview 7 | 8 | This guide goes into detail on how to back up and restore a Polygon Edge node instance. 9 | It covers the base folders and what they contain, as well as which files are critical for performing a successful backup and restore. 10 | 11 | ## Base folders 12 | 13 | Polygon Edge leverages LevelDB as its storage engine. 14 | When starting a Polygon Edge node, the following sub-folders are created in the specified working directory: 15 | * **blockchain** - Stores the blockchain data 16 | * **trie** - Stores the Merkle tries (world state data) 17 | * **keystore** - Stores private keys for the client. This includes the libp2p private key and the sealing/validator private key 18 | * **consensus** - Stores any consensus information that the client might need while working. For now, it stores the node's *private validator key* 19 | 20 | It is critical for these folders to be preserved in order for the Polygon Edge instance to run smoothly. 21 | 22 | ## Create backup from a running node and restore for new node 23 | 24 | This section guides you through creating archive data of the blockchain in a running node and restoring it in another instance. 25 | 26 | ### Backup 27 | 28 | `backup` command fetches blocks from a running node by gRPC and generates an archive file. If `--from` and `--to` are not given in the command, this command will fetch blocks from genesis to latest. 29 | 30 | ```bash 31 | $ polygon-edge backup --grpc-address 127.0.0.1:9632 --out backup.dat [--from 0x0] [--to 0x100] 32 | ``` 33 | 34 | ### Restore 35 | 36 | A server imports blocks from an archive at the start when starting with `--restore` flag. Please make sure that there is a key for new node. To find out more about importing or generating keys, visit the [Secret Managers section](/docs/configuration/secret-managers/set-up-aws-ssm). 37 | 38 | ```bash 39 | $ polygon-edge server --restore archive.dat 40 | ``` 41 | 42 | ## Back up/Restore Whole data 43 | 44 | This section guides you through backup the data including state data and key and restoring into the new instance. 45 | 46 | ### Step 1: Stop the running client 47 | 48 | Since the Polygon Edge uses **LevelDB** for data storage, the node needs to be stopped for the duration of the backup, 49 | as **LevelDB** doesn't allow for concurrent access to its database files. 50 | 51 | Additionally, the Polygon Edge also does data flushing on close. 52 | 53 | The first step involves stopping the running client (either through a service manager or some other mechanism that sends a SIGINT signal to the process), 54 | so it can trigger 2 events while gracefully shutting down: 55 | * Running data flush to disk 56 | * Release of the DB files lock by LevelDB 57 | 58 | ### Step 2: Backup the directory 59 | 60 | Now that the client is not running, the data directory can be backed up to another medium. 61 | Keep in mind that the files with a `.key` extension contain the private key data that can be used to impersonate the current node, 62 | and they should never be shared with a third/unknown party. 63 | 64 | :::info 65 | Please back up and restore the generated `genesis` file manually, so the restored node is fully operational. 66 | ::: 67 | 68 | ## Restore 69 | 70 | ### Step 1: Stop the running client 71 | 72 | If any instance of the Polygon Edge is running, it needs to be stopped in order for step 2 to be successful. 73 | 74 | ### Step 2: Copy the backed up data directory to the desired folder 75 | 76 | Once the client is not running, the data directory which was previously backed up can be copied over to the desired folder. 77 | Additionally, restore the previously copied `genesis` file. 78 | 79 | ### Step 3: Run the Polygon Edge client while specifying the correct data directory 80 | 81 | In order for the Polygon Edge to use the restored data directory, at launch, the user needs to specify the path to the 82 | data directory. Please consult the [CLI Commands](/docs/get-started/cli-commands) section on information regarding the `data-dir` flag. 83 | -------------------------------------------------------------------------------- /docs/working-with-node/query-json-rpc.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: query-json-rpc 3 | title: Query JSON RPC endpoints 4 | --- 5 | 6 | ## Overview 7 | 8 | The JSON-RPC layer of the Polygon Edge provides developers with the functionality of easily interacting with the blockchain, 9 | through HTTP requests. 10 | 11 | This example covers using tools like **curl** to query information, as well as starting the chain with a premined account, 12 | and sending a transaction. 13 | 14 | ## Step 1: Create a genesis file with a premined account 15 | 16 | To generate a genesis file, run the following command: 17 | ````bash 18 | polygon-edge genesis --premine 0x1010101010101010101010101010101010101010 19 | ```` 20 | 21 | The **premine** flag sets the address that should be included with a starting balance in the **genesis** file.
22 | In this case, the address `0x1010101010101010101010101010101010101010` will have a starting **default balance** of 23 | `0x3635C9ADC5DEA00000 wei`. 24 | 25 | If we wanted to specify a balance, we can separate out the balance and address with a `:`, like so: 26 | ````bash 27 | polygon-edge genesis --premine 0x1010101010101010101010101010101010101010:0x123123 28 | ```` 29 | 30 | The balance can be either a `hex` or `uint256` value. 31 | 32 | :::warning Only premine accounts for which you have a private key! 33 | If you premine accounts and do not have a private key to access them, you premined balance will not be usable 34 | ::: 35 | 36 | ## Step 2: Start the Polygon Edge in dev mode 37 | 38 | To start the Polygon Edge in development mode, which is explained in the [CLI Commands](/docs/get-started/cli-commands) section, 39 | run the following: 40 | ````bash 41 | polygon-edge server --chain genesis.json --dev --log-level debug 42 | ```` 43 | 44 | ## Step 3: Query the account balance 45 | 46 | Now that the client is up and running in dev mode, using the genesis file generated in **step 1**, we can use a tool like 47 | **curl** to query the account balance: 48 | ````bash 49 | curl -X POST --data '{"jsonrpc":"2.0","method":"eth_getBalance","params":["0x1010101010101010101010101010101010101010", "latest"],"id":1}' localhost:8545 50 | ```` 51 | 52 | The command should return the following output: 53 | ````bash 54 | { 55 | "id":1, 56 | "result":"0x100000000000000000000000000" 57 | } 58 | ```` 59 | 60 | ## Step 4: Send a transfer transaction 61 | 62 | Now that we've confirmed the account we set up as premined has the correct balance, we can transfer some ether: 63 | 64 | ````js 65 | var Web3 = require("web3"); 66 | 67 | const web3 = new Web3(""); //example: ws://localhost:10002/ws 68 | web3.eth.accounts 69 | .signTransaction( 70 | { 71 | to: "", 72 | value: web3.utils.toWei(""), 73 | gas: 21000, 74 | }, 75 | "" 76 | ) 77 | .then((signedTxData) => { 78 | web3.eth 79 | .sendSignedTransaction(signedTxData.rawTransaction) 80 | .on("receipt", console.log); 81 | }); 82 | 83 | ```` 84 | -------------------------------------------------------------------------------- /docs/working-with-node/query-operator-info.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: query-operator-info 3 | title: Query operator information 4 | --- 5 | 6 | ## Prerequisites 7 | 8 | This guide assumes you have followed the [Local Setup](/docs/get-started/set-up-ibft-locally) or [guide on how to set up an IBFT cluster on the cloud](/docs/get-started/set-up-ibft-on-the-cloud). 9 | 10 | A functioning node is required in order to query any kind of operator information. 11 | 12 | With the Polygon Edge, node operators are in control and informed about what the node they're operating is doing.
13 | At any time, they can use the node information layer, built on top of gRPC, and get meaningful information - no log sifting required. 14 | 15 | :::note 16 | 17 | If your node isn't running on `127.0.0.1:8545` you should add a flag `--grpc-address ` to the commands listed in this document. 18 | 19 | ::: 20 | 21 | ## Peer information 22 | 23 | ### Peers list 24 | 25 | To get a complete list of connected peers (including the running node itself), run the following command: 26 | ````bash 27 | polygon-edge peers list 28 | ```` 29 | 30 | This will return a list of libp2p addresses that are currently peers of the running client. 31 | 32 | ### Peer status 33 | 34 | For the status of a specific peer, run: 35 | ````bash 36 | polygon-edge peers status --peer-id
37 | ```` 38 | With the *address* parameter being the libp2p address of the peer. 39 | 40 | ## IBFT info 41 | 42 | Lots of times, an operator might want to know about the state of the operating node in IBFT consensus. 43 | 44 | Luckily, the Polygon Edge provides an easy way to find this information. 45 | 46 | ### Snapshots 47 | 48 | Running the following command returns the most recent snapshot. 49 | ````bash 50 | polygon-edge ibft snapshot 51 | ```` 52 | To query the snapshot at a specific height (block number), the operator can run: 53 | ````bash 54 | polygon-edge ibft snapshot --num 55 | ```` 56 | 57 | ### Candidates 58 | 59 | To get the latest info on candidates, the operator can run: 60 | ````bash 61 | polygon-edge ibft candidates 62 | ```` 63 | This command queries the current set of proposed candidates, as well as candidates that have not been included yet 64 | 65 | ### Status 66 | 67 | The following command returns the current validator key of the running IBFT client: 68 | ````bash 69 | polygon-edge ibft status 70 | ```` 71 | 72 | ## Transaction pool 73 | 74 | To find the current number of transactions in the transaction pool, the operator can run: 75 | ````bash 76 | polygon-edge txpool status 77 | ```` 78 | -------------------------------------------------------------------------------- /docusaurus.config.js: -------------------------------------------------------------------------------- 1 | require('dotenv').config() 2 | 3 | /** @type {import('@docusaurus/types').DocusaurusConfig} */ 4 | module.exports = { 5 | title: 'Polygon Edge', 6 | url: 'https://edge-docs.polygon.technology', 7 | baseUrl: '/', 8 | onBrokenLinks: 'throw', 9 | onBrokenMarkdownLinks: 'warn', 10 | favicon: 'img/favicon-32x32.png', 11 | organizationName: '0xPolygon', 12 | projectName: 'polygon-edge-docs', 13 | themes: ['docusaurus-theme-search-typesense'], 14 | themeConfig: { 15 | colorMode: { 16 | defaultMode: 'dark' 17 | }, 18 | prism: { 19 | defaultLanguage: 'go' 20 | }, 21 | navbar: { 22 | hideOnScroll: true, 23 | title: 'Polygon Edge', 24 | logo: { 25 | alt: 'Polygon Edge Logo', 26 | src: 'img/logo.svg', 27 | href: 'docs/overview' 28 | }, 29 | items: [ 30 | { 31 | to: 'docs/overview', 32 | activeBasePath: 'docs/', 33 | label: 'Docs', 34 | position: 'left' 35 | }, 36 | { 37 | href: 'https://github.com/0xPolygon/polygon-edge', 38 | position: 'right', 39 | className: 'header-github-link', 40 | 'aria-label': 'GitHub repository', 41 | }, 42 | ] 43 | }, 44 | footer: { 45 | style: 'dark', 46 | copyright: `Made with ❤ by the humans at Trapesys` 47 | }, 48 | typesense: { 49 | typesenseCollectionName: 'sdk-docs.polygon.technology', // Replace with your own doc site's name. Should match the collection name in the scraper settings. 50 | typesenseServerConfig: { 51 | nodes: [ 52 | { 53 | host: 'fvtlbamhupdcon8rp-1.a1.typesense.net', 54 | port: 443, 55 | protocol: 'https', 56 | }, 57 | ], 58 | apiKey: process.env.TYPESENSE_API_KEY, 59 | }, 60 | }, 61 | }, 62 | presets: [ 63 | [ 64 | '@docusaurus/preset-classic', 65 | { 66 | docs: { 67 | sidebarPath: require.resolve('./sidebars.js'), 68 | showLastUpdateAuthor: false, 69 | showLastUpdateTime: false 70 | }, 71 | theme: { 72 | customCss: require.resolve('./src/css/custom.css') 73 | } 74 | } 75 | ] 76 | ] 77 | }; 78 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "polygon-edge-docs", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "docusaurus": "docusaurus", 7 | "start": "docusaurus start", 8 | "build": "docusaurus build", 9 | "swizzle": "docusaurus swizzle", 10 | "deploy": "docusaurus deploy", 11 | "clear": "docusaurus clear", 12 | "serve": "docusaurus serve", 13 | "write-translations": "docusaurus write-translations", 14 | "write-heading-ids": "docusaurus write-heading-ids", 15 | "test": "jest --noStackTrace" 16 | }, 17 | "dependencies": { 18 | "@docusaurus/core": "2.0.0-beta.14", 19 | "@docusaurus/preset-classic": "2.0.0-beta.14", 20 | "@mdx-js/react": "^1.6.21", 21 | "clsx": "^1.1.1", 22 | "docusaurus-theme-search-typesense": "0.3.0-1", 23 | "dotenv": "^10.0.0", 24 | "react": "^17.0.1", 25 | "react-dom": "^17.0.1" 26 | }, 27 | "browserslist": { 28 | "production": [ 29 | ">0.5%", 30 | "not dead", 31 | "not op_mini all" 32 | ], 33 | "development": [ 34 | "last 1 chrome version", 35 | "last 1 firefox version", 36 | "last 1 safari version" 37 | ] 38 | }, 39 | "devDependencies": { 40 | "@babel/preset-env": "^7.16.11", 41 | "@docusaurus/module-type-aliases": "^2.0.0-alpha.72", 42 | "@tsconfig/docusaurus": "^1.0.2", 43 | "@types/jest": "^27.4.1", 44 | "@types/react": "^17.0.3", 45 | "@types/react-helmet": "^6.1.0", 46 | "@types/react-router-dom": "^5.1.7", 47 | "jest": "^27.5.1", 48 | "typescript": "^4.2.3" 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /scrapper_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "index_name": "sdk-docs.polygon.technology", 3 | "start_urls": [ 4 | "https://edge-docs.polygon.technology" 5 | ], 6 | "sitemap_urls": [ 7 | "https://edge-docs.polygon.technology/sitemap.xml" 8 | ], 9 | "stop_urls": [], 10 | "sitemap_alternate_links": true, 11 | "selectors": { 12 | "lvl0": { 13 | "selector": "(//ul[contains(@class,'menu__list')]//a[contains(@class, 'menu__link menu__link--sublist menu__link--active')]/text() | //nav[contains(@class, 'navbar')]//a[contains(@class, 'navbar__link--active')]/text())[last()]", 14 | "type": "xpath", 15 | "global": true, 16 | "default_value": "Documentation" 17 | }, 18 | "lvl1": "header h1", 19 | "lvl2": "article h2", 20 | "lvl3": "article h3", 21 | "lvl4": "article h4", 22 | "lvl5": "article h5, article td:first-child", 23 | "lvl6": "article h6", 24 | "text": "article p, article li, article td:last-child" 25 | }, 26 | "strip_chars": " .,;:#", 27 | "custom_settings": { 28 | "separatorsToIndex": "_", 29 | "attributesForFaceting": [ 30 | "language", 31 | "version", 32 | "type", 33 | "docusaurus_tag" 34 | ], 35 | "attributesToRetrieve": [ 36 | "hierarchy", 37 | "content", 38 | "anchor", 39 | "url", 40 | "url_without_anchor", 41 | "type" 42 | ] 43 | }, 44 | "conversation_id": [ 45 | "833762294" 46 | ], 47 | "nb_hits": 46250 48 | } 49 | -------------------------------------------------------------------------------- /sidebars.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | develop: [ 3 | 'overview', 4 | { 5 | type: 'category', 6 | label: 'Get started', 7 | items: [ 8 | 'get-started/installation', 9 | 'get-started/set-up-ibft-locally', 10 | 'get-started/set-up-ibft-on-the-cloud', 11 | 'get-started/cli-commands', 12 | 'get-started/json-rpc-commands', 13 | ] 14 | }, 15 | { 16 | type: 'category', 17 | label: 'Configuration', 18 | items: [ 19 | 'configuration/sample-config', 20 | 'configuration/manage-private-keys', 21 | 'configuration/enable-metrics', 22 | { 23 | type: 'category', 24 | label: 'Secret Managers', 25 | items: [ 26 | 'configuration/secret-managers/set-up-aws-ssm', 27 | 'configuration/secret-managers/set-up-gcp-secrets-manager', 28 | 'configuration/secret-managers/set-up-hashicorp-vault', 29 | ] 30 | } 31 | ] 32 | }, 33 | { 34 | type: 'category', 35 | label: 'Working with a node', 36 | items: [ 37 | 'working-with-node/query-json-rpc', 38 | 'working-with-node/query-operator-info', 39 | 'working-with-node/backup-restore', 40 | ] 41 | }, 42 | { 43 | type: 'category', 44 | label: 'Consensus', 45 | items: [ 46 | 'consensus/poa', 47 | 'consensus/pos-concepts', 48 | 'consensus/pos-stake-unstake', 49 | 'consensus/migration-to-pos' 50 | ] 51 | }, 52 | { 53 | type: 'category', 54 | label: 'Additional features', 55 | items: [ 56 | { 57 | type: 'category', 58 | label: 'Chainbridge', 59 | items: [ 60 | 'additional-features/chainbridge/overview', 61 | 'additional-features/chainbridge/definitions', 62 | 'additional-features/chainbridge/setup', 63 | 'additional-features/chainbridge/setup-erc20-transfer', 64 | 'additional-features/chainbridge/setup-erc721-transfer', 65 | 'additional-features/chainbridge/use-case-erc20-bridge', 66 | 'additional-features/chainbridge/use-case-erc721-bridge', 67 | ] 68 | }, 69 | 'additional-features/stress-testing', 70 | 'additional-features/blockscout', 71 | ] 72 | }, 73 | { 74 | type: 'category', 75 | label: 'Architecture', 76 | items: [ 77 | 'architecture/overview', 78 | { 79 | type: 'category', 80 | label: 'Modules', 81 | items: [ 82 | 'architecture/modules/blockchain', 83 | 'architecture/modules/minimal', 84 | 'architecture/modules/networking', 85 | 'architecture/modules/state', 86 | 'architecture/modules/txpool', 87 | 'architecture/modules/json-rpc', 88 | 'architecture/modules/consensus', 89 | 'architecture/modules/storage', 90 | 'architecture/modules/types', 91 | 'architecture/modules/protocol', 92 | 'architecture/modules/sealer', 93 | 'architecture/modules/other-modules' 94 | ] 95 | } 96 | ] 97 | }, 98 | { 99 | type: 'category', 100 | label: 'Concepts', 101 | items: [ 102 | 'concepts/ethereum-state' 103 | ] 104 | }, 105 | { 106 | type: 'category', 107 | label: 'Community', 108 | items: [ 109 | 'community/propose-new-feature', 110 | 'community/report-bug' 111 | ] 112 | }, 113 | { 114 | type: 'category', 115 | label: 'Performance Reports', 116 | items: [ 117 | 'performance-reports/overview', 118 | { 119 | type: 'category', 120 | label: 'Test History', 121 | items: [ 122 | 'performance-reports/test-history/test-2022-07-04', 123 | 'performance-reports/test-history/test-2022-03-23', 124 | 'performance-reports/test-history/test-2022-03-02', 125 | 'performance-reports/test-history/test-2022-01-21', 126 | ] 127 | } 128 | ] 129 | } 130 | ] 131 | }; 132 | -------------------------------------------------------------------------------- /src/css/custom.css: -------------------------------------------------------------------------------- 1 | /* stylelint-disable docusaurus/copyright-header */ 2 | /** 3 | * Any CSS included here will be global. The classic template 4 | * bundles Infima by default. Infima is a CSS framework designed to 5 | * work well for content-centric websites. 6 | */ 7 | 8 | /* You can override the default Infima variables here. */ 9 | :root { 10 | --ifm-color-primary: #8247e5; 11 | --ifm-color-primary-dark: #783BE3; 12 | --ifm-color-primary-darker: #6C29E0; 13 | --ifm-color-primary-darkest: #621FD6; 14 | --ifm-color-primary-light: #915EE8; 15 | --ifm-color-primary-lighter: #9D70EB; 16 | --ifm-color-primary-lightest: #A982ED; 17 | --ifm-code-font-size: 95%; 18 | 19 | /* CAUTION */ 20 | --ifm-color-warning: #fedc86; 21 | --ifm-alert-color: black; 22 | 23 | /* DANGER */ 24 | --ifm-color-danger: #f25c54; 25 | 26 | /* SUCCESS */ 27 | --ifm-color-success: #50c467; 28 | } 29 | 30 | .docusaurus-highlight-code-line { 31 | background-color: rgb(72, 77, 91); 32 | display: block; 33 | margin: 0 calc(-1 * var(--ifm-pre-padding)); 34 | padding: 0 var(--ifm-pre-padding); 35 | } 36 | 37 | .navbar__item { 38 | font-weight: bold; 39 | } 40 | 41 | /* Default value for table content font-size is 0.8rem. With 0.78rem all titles in sidebar fit in one line */ 42 | .table-of-contents { 43 | font-size: 0.78rem !important; 44 | } 45 | 46 | .json_rpc_terminal { 47 | background-color: rgb(41, 45, 62); 48 | width: 100%; 49 | color: rgb(191, 199, 213); 50 | } 51 | 52 | .json_rpc_run_command_button { 53 | border: 1px solid; 54 | background-color: transparent; 55 | padding: 10px; 56 | border-radius: 5px; 57 | } 58 | 59 | .json_rpc_run_command_button:hover { 60 | color: var(--ifm-color-primary); 61 | } 62 | 63 | .header-github-link:hover { 64 | opacity: 0.6; 65 | } 66 | 67 | .header-github-link:before { 68 | content: ''; 69 | width: 24px; 70 | height: 24px; 71 | display: flex; 72 | background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") no-repeat; 73 | } 74 | 75 | html[data-theme='dark'] .header-github-link:before { 76 | background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='white' d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") no-repeat; 77 | } -------------------------------------------------------------------------------- /src/pages/markdown-page.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Markdown page example 3 | --- 4 | 5 | # Markdown page example 6 | 7 | You don't need React to write simple standalone pages. 8 | -------------------------------------------------------------------------------- /src/pages/styles.module.css: -------------------------------------------------------------------------------- 1 | /* stylelint-disable docusaurus/copyright-header */ 2 | 3 | /** 4 | * CSS files with the .module.css suffix will be treated as CSS modules 5 | * and scoped locally. 6 | */ 7 | 8 | .heroBanner { 9 | padding: 4rem 0; 10 | text-align: center; 11 | position: relative; 12 | overflow: hidden; 13 | } 14 | 15 | @media screen and (max-width: 966px) { 16 | .heroBanner { 17 | padding: 2rem; 18 | } 19 | } 20 | 21 | .buttons { 22 | display: flex; 23 | align-items: center; 24 | justify-content: center; 25 | } 26 | 27 | .features { 28 | display: flex; 29 | align-items: center; 30 | padding: 2rem 0; 31 | width: 100%; 32 | } 33 | 34 | .featureImage { 35 | height: 200px; 36 | width: 200px; 37 | } 38 | -------------------------------------------------------------------------------- /static/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/.nojekyll -------------------------------------------------------------------------------- /static/img/Architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/Architecture.jpg -------------------------------------------------------------------------------- /static/img/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/android-chrome-192x192.png -------------------------------------------------------------------------------- /static/img/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/android-chrome-512x512.png -------------------------------------------------------------------------------- /static/img/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/apple-touch-icon.png -------------------------------------------------------------------------------- /static/img/browserconfig.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | #000000 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /static/img/docusaurus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/docusaurus.png -------------------------------------------------------------------------------- /static/img/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/favicon-16x16.png -------------------------------------------------------------------------------- /static/img/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/favicon-32x32.png -------------------------------------------------------------------------------- /static/img/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /static/img/mstile-150x150.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/mstile-150x150.png -------------------------------------------------------------------------------- /static/img/safari-pinned-tab.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 7 | 8 | Created by potrace 1.11, written by Peter Selinger 2001-2013 9 | 10 | 12 | 92 | 93 | 94 | -------------------------------------------------------------------------------- /static/img/site.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "short_name": "", 4 | "icons": [ 5 | { 6 | "src": "/android-chrome-192x192.png", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | }, 10 | { 11 | "src": "/android-chrome-512x512.png", 12 | "sizes": "512x512", 13 | "type": "image/png" 14 | } 15 | ], 16 | "theme_color": "#ffffff", 17 | "background_color": "#ffffff", 18 | "display": "standalone" 19 | } 20 | -------------------------------------------------------------------------------- /static/img/state/accountState.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/state/accountState.png -------------------------------------------------------------------------------- /static/img/state/block.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/state/block.png -------------------------------------------------------------------------------- /static/img/state/mainDiagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/state/mainDiagram.png -------------------------------------------------------------------------------- /static/img/state/merkleTree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/state/merkleTree.png -------------------------------------------------------------------------------- /static/img/state/worldState.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/state/worldState.png -------------------------------------------------------------------------------- /static/img/txpool-error-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/txpool-error-1.png -------------------------------------------------------------------------------- /static/img/txpool-error-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xPolygon/polygon-edge-docs/fe058c17d4038fa74e1ac22bae5cdb1cf9658916/static/img/txpool-error-2.png -------------------------------------------------------------------------------- /static/img/undraw_docusaurus_tree.svg: -------------------------------------------------------------------------------- 1 | docu_tree -------------------------------------------------------------------------------- /static/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 9 | 12 | Your Site Title Here 13 | 14 | 15 | If you are not redirected automatically, follow this 16 | link. 17 | 18 | 19 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/docusaurus/tsconfig.json", 3 | "include": [ 4 | "src/" 5 | ] 6 | } --------------------------------------------------------------------------------